diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-11 08:27:49 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-11 08:27:49 +0000 |
commit | ace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch) | |
tree | b2d64bc10158fdd5497876388cd68142ca374ed3 /drivers/gpu/drm/amd/display/amdgpu_dm | |
parent | Initial commit. (diff) | |
download | linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip |
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/gpu/drm/amd/display/amdgpu_dm')
29 files changed, 26493 insertions, 0 deletions
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile new file mode 100644 index 0000000000..8bf94920d2 --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/Makefile @@ -0,0 +1,54 @@ +# +# Copyright 2017 Advanced Micro Devices, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR +# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. +# +# +# Makefile for the 'dm' sub-component of DAL. +# It provides the control and status of dm blocks. + + + +AMDGPUDM = \ + amdgpu_dm.o \ + amdgpu_dm_plane.o \ + amdgpu_dm_crtc.o \ + amdgpu_dm_irq.o \ + amdgpu_dm_mst_types.o \ + amdgpu_dm_color.o + +ifdef CONFIG_DRM_AMD_DC_FP +AMDGPUDM += dc_fpu.o +endif + +ifneq ($(CONFIG_DRM_AMD_DC),) +AMDGPUDM += amdgpu_dm_services.o amdgpu_dm_helpers.o amdgpu_dm_pp_smu.o amdgpu_dm_psr.o amdgpu_dm_replay.o +endif + +AMDGPUDM += amdgpu_dm_hdcp.o + +ifneq ($(CONFIG_DEBUG_FS),) +AMDGPUDM += amdgpu_dm_crc.o amdgpu_dm_debugfs.o +endif + +subdir-ccflags-y += -I$(FULL_AMD_DISPLAY_PATH)/dc + +AMDGPU_DM = $(addprefix $(AMDDALPATH)/amdgpu_dm/,$(AMDGPUDM)) + +AMD_DISPLAY_FILES += $(AMDGPU_DM) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c new file mode 100644 index 0000000000..861b5e45e2 --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -0,0 +1,11043 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +/* The caprices of the preprocessor require that this be declared right here */ +#define CREATE_TRACE_POINTS + +#include "dm_services_types.h" +#include "dc.h" +#include "link_enc_cfg.h" +#include "dc/inc/core_types.h" +#include "dal_asic_id.h" +#include "dmub/dmub_srv.h" +#include "dc/inc/hw/dmcu.h" +#include "dc/inc/hw/abm.h" +#include "dc/dc_dmub_srv.h" +#include "dc/dc_edid_parser.h" +#include "dc/dc_stat.h" +#include "amdgpu_dm_trace.h" +#include "dpcd_defs.h" +#include "link/protocols/link_dpcd.h" +#include "link_service_types.h" +#include "link/protocols/link_dp_capability.h" +#include "link/protocols/link_ddc.h" + +#include "vid.h" +#include "amdgpu.h" +#include "amdgpu_display.h" +#include "amdgpu_ucode.h" +#include "atom.h" +#include "amdgpu_dm.h" +#include "amdgpu_dm_plane.h" +#include "amdgpu_dm_crtc.h" +#include "amdgpu_dm_hdcp.h" +#include <drm/display/drm_hdcp_helper.h> +#include "amdgpu_pm.h" +#include "amdgpu_atombios.h" + +#include "amd_shared.h" +#include "amdgpu_dm_irq.h" +#include "dm_helpers.h" +#include "amdgpu_dm_mst_types.h" +#if defined(CONFIG_DEBUG_FS) +#include "amdgpu_dm_debugfs.h" +#endif +#include "amdgpu_dm_psr.h" +#include "amdgpu_dm_replay.h" + +#include "ivsrcid/ivsrcid_vislands30.h" + +#include <linux/backlight.h> +#include <linux/module.h> +#include <linux/moduleparam.h> +#include <linux/types.h> +#include <linux/pm_runtime.h> +#include <linux/pci.h> +#include <linux/firmware.h> +#include <linux/component.h> +#include <linux/dmi.h> + +#include <drm/display/drm_dp_mst_helper.h> +#include <drm/display/drm_hdmi_helper.h> +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_uapi.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_blend.h> +#include <drm/drm_fourcc.h> +#include <drm/drm_edid.h> +#include <drm/drm_vblank.h> +#include <drm/drm_audio_component.h> +#include <drm/drm_gem_atomic_helper.h> +#include <drm/drm_plane_helper.h> + +#include <acpi/video.h> + +#include "ivsrcid/dcn/irqsrcs_dcn_1_0.h" + +#include "dcn/dcn_1_0_offset.h" +#include "dcn/dcn_1_0_sh_mask.h" +#include "soc15_hw_ip.h" +#include "soc15_common.h" +#include "vega10_ip_offset.h" + +#include "gc/gc_11_0_0_offset.h" +#include "gc/gc_11_0_0_sh_mask.h" + +#include "modules/inc/mod_freesync.h" +#include "modules/power/power_helpers.h" + +#define FIRMWARE_RENOIR_DMUB "amdgpu/renoir_dmcub.bin" +MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB); +#define FIRMWARE_SIENNA_CICHLID_DMUB "amdgpu/sienna_cichlid_dmcub.bin" +MODULE_FIRMWARE(FIRMWARE_SIENNA_CICHLID_DMUB); +#define FIRMWARE_NAVY_FLOUNDER_DMUB "amdgpu/navy_flounder_dmcub.bin" +MODULE_FIRMWARE(FIRMWARE_NAVY_FLOUNDER_DMUB); +#define FIRMWARE_GREEN_SARDINE_DMUB "amdgpu/green_sardine_dmcub.bin" +MODULE_FIRMWARE(FIRMWARE_GREEN_SARDINE_DMUB); +#define FIRMWARE_VANGOGH_DMUB "amdgpu/vangogh_dmcub.bin" +MODULE_FIRMWARE(FIRMWARE_VANGOGH_DMUB); +#define FIRMWARE_DIMGREY_CAVEFISH_DMUB "amdgpu/dimgrey_cavefish_dmcub.bin" +MODULE_FIRMWARE(FIRMWARE_DIMGREY_CAVEFISH_DMUB); +#define FIRMWARE_BEIGE_GOBY_DMUB "amdgpu/beige_goby_dmcub.bin" +MODULE_FIRMWARE(FIRMWARE_BEIGE_GOBY_DMUB); +#define FIRMWARE_YELLOW_CARP_DMUB "amdgpu/yellow_carp_dmcub.bin" +MODULE_FIRMWARE(FIRMWARE_YELLOW_CARP_DMUB); +#define FIRMWARE_DCN_314_DMUB "amdgpu/dcn_3_1_4_dmcub.bin" +MODULE_FIRMWARE(FIRMWARE_DCN_314_DMUB); +#define FIRMWARE_DCN_315_DMUB "amdgpu/dcn_3_1_5_dmcub.bin" +MODULE_FIRMWARE(FIRMWARE_DCN_315_DMUB); +#define FIRMWARE_DCN316_DMUB "amdgpu/dcn_3_1_6_dmcub.bin" +MODULE_FIRMWARE(FIRMWARE_DCN316_DMUB); + +#define FIRMWARE_DCN_V3_2_0_DMCUB "amdgpu/dcn_3_2_0_dmcub.bin" +MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_0_DMCUB); +#define FIRMWARE_DCN_V3_2_1_DMCUB "amdgpu/dcn_3_2_1_dmcub.bin" +MODULE_FIRMWARE(FIRMWARE_DCN_V3_2_1_DMCUB); + +#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin" +MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU); + +#define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin" +MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU); + +/* Number of bytes in PSP header for firmware. */ +#define PSP_HEADER_BYTES 0x100 + +/* Number of bytes in PSP footer for firmware. */ +#define PSP_FOOTER_BYTES 0x100 + +/** + * DOC: overview + * + * The AMDgpu display manager, **amdgpu_dm** (or even simpler, + * **dm**) sits between DRM and DC. It acts as a liaison, converting DRM + * requests into DC requests, and DC responses into DRM responses. + * + * The root control structure is &struct amdgpu_display_manager. + */ + +/* basic init/fini API */ +static int amdgpu_dm_init(struct amdgpu_device *adev); +static void amdgpu_dm_fini(struct amdgpu_device *adev); +static bool is_freesync_video_mode(const struct drm_display_mode *mode, struct amdgpu_dm_connector *aconnector); + +static enum drm_mode_subconnector get_subconnector_type(struct dc_link *link) +{ + switch (link->dpcd_caps.dongle_type) { + case DISPLAY_DONGLE_NONE: + return DRM_MODE_SUBCONNECTOR_Native; + case DISPLAY_DONGLE_DP_VGA_CONVERTER: + return DRM_MODE_SUBCONNECTOR_VGA; + case DISPLAY_DONGLE_DP_DVI_CONVERTER: + case DISPLAY_DONGLE_DP_DVI_DONGLE: + return DRM_MODE_SUBCONNECTOR_DVID; + case DISPLAY_DONGLE_DP_HDMI_CONVERTER: + case DISPLAY_DONGLE_DP_HDMI_DONGLE: + return DRM_MODE_SUBCONNECTOR_HDMIA; + case DISPLAY_DONGLE_DP_HDMI_MISMATCHED_DONGLE: + default: + return DRM_MODE_SUBCONNECTOR_Unknown; + } +} + +static void update_subconnector_property(struct amdgpu_dm_connector *aconnector) +{ + struct dc_link *link = aconnector->dc_link; + struct drm_connector *connector = &aconnector->base; + enum drm_mode_subconnector subconnector = DRM_MODE_SUBCONNECTOR_Unknown; + + if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) + return; + + if (aconnector->dc_sink) + subconnector = get_subconnector_type(link); + + drm_object_property_set_value(&connector->base, + connector->dev->mode_config.dp_subconnector_property, + subconnector); +} + +/* + * initializes drm_device display related structures, based on the information + * provided by DAL. The drm strcutures are: drm_crtc, drm_connector, + * drm_encoder, drm_mode_config + * + * Returns 0 on success + */ +static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev); +/* removes and deallocates the drm structures, created by the above function */ +static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm); + +static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, + struct amdgpu_dm_connector *amdgpu_dm_connector, + u32 link_index, + struct amdgpu_encoder *amdgpu_encoder); +static int amdgpu_dm_encoder_init(struct drm_device *dev, + struct amdgpu_encoder *aencoder, + uint32_t link_index); + +static int amdgpu_dm_connector_get_modes(struct drm_connector *connector); + +static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state); + +static int amdgpu_dm_atomic_check(struct drm_device *dev, + struct drm_atomic_state *state); + +static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector); +static void handle_hpd_rx_irq(void *param); + +static bool +is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state, + struct drm_crtc_state *new_crtc_state); +/* + * dm_vblank_get_counter + * + * @brief + * Get counter for number of vertical blanks + * + * @param + * struct amdgpu_device *adev - [in] desired amdgpu device + * int disp_idx - [in] which CRTC to get the counter from + * + * @return + * Counter for vertical blanks + */ +static u32 dm_vblank_get_counter(struct amdgpu_device *adev, int crtc) +{ + struct amdgpu_crtc *acrtc = NULL; + + if (crtc >= adev->mode_info.num_crtc) + return 0; + + acrtc = adev->mode_info.crtcs[crtc]; + + if (!acrtc->dm_irq_params.stream) { + DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n", + crtc); + return 0; + } + + return dc_stream_get_vblank_counter(acrtc->dm_irq_params.stream); +} + +static int dm_crtc_get_scanoutpos(struct amdgpu_device *adev, int crtc, + u32 *vbl, u32 *position) +{ + u32 v_blank_start, v_blank_end, h_position, v_position; + struct amdgpu_crtc *acrtc = NULL; + + if ((crtc < 0) || (crtc >= adev->mode_info.num_crtc)) + return -EINVAL; + + acrtc = adev->mode_info.crtcs[crtc]; + + if (!acrtc->dm_irq_params.stream) { + DRM_ERROR("dc_stream_state is NULL for crtc '%d'!\n", + crtc); + return 0; + } + + /* + * TODO rework base driver to use values directly. + * for now parse it back into reg-format + */ + dc_stream_get_scanoutpos(acrtc->dm_irq_params.stream, + &v_blank_start, + &v_blank_end, + &h_position, + &v_position); + + *position = v_position | (h_position << 16); + *vbl = v_blank_start | (v_blank_end << 16); + + return 0; +} + +static bool dm_is_idle(void *handle) +{ + /* XXX todo */ + return true; +} + +static int dm_wait_for_idle(void *handle) +{ + /* XXX todo */ + return 0; +} + +static bool dm_check_soft_reset(void *handle) +{ + return false; +} + +static int dm_soft_reset(void *handle) +{ + /* XXX todo */ + return 0; +} + +static struct amdgpu_crtc * +get_crtc_by_otg_inst(struct amdgpu_device *adev, + int otg_inst) +{ + struct drm_device *dev = adev_to_drm(adev); + struct drm_crtc *crtc; + struct amdgpu_crtc *amdgpu_crtc; + + if (WARN_ON(otg_inst == -1)) + return adev->mode_info.crtcs[0]; + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + amdgpu_crtc = to_amdgpu_crtc(crtc); + + if (amdgpu_crtc->otg_inst == otg_inst) + return amdgpu_crtc; + } + + return NULL; +} + +static inline bool is_dc_timing_adjust_needed(struct dm_crtc_state *old_state, + struct dm_crtc_state *new_state) +{ + if (new_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED) + return true; + else if (amdgpu_dm_crtc_vrr_active(old_state) != amdgpu_dm_crtc_vrr_active(new_state)) + return true; + else + return false; +} + +static inline void reverse_planes_order(struct dc_surface_update *array_of_surface_update, + int planes_count) +{ + int i, j; + + for (i = 0, j = planes_count - 1; i < j; i++, j--) + swap(array_of_surface_update[i], array_of_surface_update[j]); +} + +/** + * update_planes_and_stream_adapter() - Send planes to be updated in DC + * + * DC has a generic way to update planes and stream via + * dc_update_planes_and_stream function; however, DM might need some + * adjustments and preparation before calling it. This function is a wrapper + * for the dc_update_planes_and_stream that does any required configuration + * before passing control to DC. + * + * @dc: Display Core control structure + * @update_type: specify whether it is FULL/MEDIUM/FAST update + * @planes_count: planes count to update + * @stream: stream state + * @stream_update: stream update + * @array_of_surface_update: dc surface update pointer + * + */ +static inline bool update_planes_and_stream_adapter(struct dc *dc, + int update_type, + int planes_count, + struct dc_stream_state *stream, + struct dc_stream_update *stream_update, + struct dc_surface_update *array_of_surface_update) +{ + reverse_planes_order(array_of_surface_update, planes_count); + + /* + * Previous frame finished and HW is ready for optimization. + */ + if (update_type == UPDATE_TYPE_FAST) + dc_post_update_surfaces_to_stream(dc); + + return dc_update_planes_and_stream(dc, + array_of_surface_update, + planes_count, + stream, + stream_update); +} + +/** + * dm_pflip_high_irq() - Handle pageflip interrupt + * @interrupt_params: ignored + * + * Handles the pageflip interrupt by notifying all interested parties + * that the pageflip has been completed. + */ +static void dm_pflip_high_irq(void *interrupt_params) +{ + struct amdgpu_crtc *amdgpu_crtc; + struct common_irq_params *irq_params = interrupt_params; + struct amdgpu_device *adev = irq_params->adev; + unsigned long flags; + struct drm_pending_vblank_event *e; + u32 vpos, hpos, v_blank_start, v_blank_end; + bool vrr_active; + + amdgpu_crtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_PFLIP); + + /* IRQ could occur when in initial stage */ + /* TODO work and BO cleanup */ + if (amdgpu_crtc == NULL) { + DC_LOG_PFLIP("CRTC is null, returning.\n"); + return; + } + + spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); + + if (amdgpu_crtc->pflip_status != AMDGPU_FLIP_SUBMITTED) { + DC_LOG_PFLIP("amdgpu_crtc->pflip_status = %d !=AMDGPU_FLIP_SUBMITTED(%d) on crtc:%d[%p]\n", + amdgpu_crtc->pflip_status, + AMDGPU_FLIP_SUBMITTED, + amdgpu_crtc->crtc_id, + amdgpu_crtc); + spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); + return; + } + + /* page flip completed. */ + e = amdgpu_crtc->event; + amdgpu_crtc->event = NULL; + + WARN_ON(!e); + + vrr_active = amdgpu_dm_crtc_vrr_active_irq(amdgpu_crtc); + + /* Fixed refresh rate, or VRR scanout position outside front-porch? */ + if (!vrr_active || + !dc_stream_get_scanoutpos(amdgpu_crtc->dm_irq_params.stream, &v_blank_start, + &v_blank_end, &hpos, &vpos) || + (vpos < v_blank_start)) { + /* Update to correct count and vblank timestamp if racing with + * vblank irq. This also updates to the correct vblank timestamp + * even in VRR mode, as scanout is past the front-porch atm. + */ + drm_crtc_accurate_vblank_count(&amdgpu_crtc->base); + + /* Wake up userspace by sending the pageflip event with proper + * count and timestamp of vblank of flip completion. + */ + if (e) { + drm_crtc_send_vblank_event(&amdgpu_crtc->base, e); + + /* Event sent, so done with vblank for this flip */ + drm_crtc_vblank_put(&amdgpu_crtc->base); + } + } else if (e) { + /* VRR active and inside front-porch: vblank count and + * timestamp for pageflip event will only be up to date after + * drm_crtc_handle_vblank() has been executed from late vblank + * irq handler after start of back-porch (vline 0). We queue the + * pageflip event for send-out by drm_crtc_handle_vblank() with + * updated timestamp and count, once it runs after us. + * + * We need to open-code this instead of using the helper + * drm_crtc_arm_vblank_event(), as that helper would + * call drm_crtc_accurate_vblank_count(), which we must + * not call in VRR mode while we are in front-porch! + */ + + /* sequence will be replaced by real count during send-out. */ + e->sequence = drm_crtc_vblank_count(&amdgpu_crtc->base); + e->pipe = amdgpu_crtc->crtc_id; + + list_add_tail(&e->base.link, &adev_to_drm(adev)->vblank_event_list); + e = NULL; + } + + /* Keep track of vblank of this flip for flip throttling. We use the + * cooked hw counter, as that one incremented at start of this vblank + * of pageflip completion, so last_flip_vblank is the forbidden count + * for queueing new pageflips if vsync + VRR is enabled. + */ + amdgpu_crtc->dm_irq_params.last_flip_vblank = + amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base); + + amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE; + spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); + + DC_LOG_PFLIP("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_NONE, vrr[%d]-fp %d\n", + amdgpu_crtc->crtc_id, amdgpu_crtc, + vrr_active, (int) !e); +} + +static void dm_vupdate_high_irq(void *interrupt_params) +{ + struct common_irq_params *irq_params = interrupt_params; + struct amdgpu_device *adev = irq_params->adev; + struct amdgpu_crtc *acrtc; + struct drm_device *drm_dev; + struct drm_vblank_crtc *vblank; + ktime_t frame_duration_ns, previous_timestamp; + unsigned long flags; + int vrr_active; + + acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VUPDATE); + + if (acrtc) { + vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc); + drm_dev = acrtc->base.dev; + vblank = &drm_dev->vblank[acrtc->base.index]; + previous_timestamp = atomic64_read(&irq_params->previous_timestamp); + frame_duration_ns = vblank->time - previous_timestamp; + + if (frame_duration_ns > 0) { + trace_amdgpu_refresh_rate_track(acrtc->base.index, + frame_duration_ns, + ktime_divns(NSEC_PER_SEC, frame_duration_ns)); + atomic64_set(&irq_params->previous_timestamp, vblank->time); + } + + DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d\n", + acrtc->crtc_id, + vrr_active); + + /* Core vblank handling is done here after end of front-porch in + * vrr mode, as vblank timestamping will give valid results + * while now done after front-porch. This will also deliver + * page-flip completion events that have been queued to us + * if a pageflip happened inside front-porch. + */ + if (vrr_active) { + amdgpu_dm_crtc_handle_vblank(acrtc); + + /* BTR processing for pre-DCE12 ASICs */ + if (acrtc->dm_irq_params.stream && + adev->family < AMDGPU_FAMILY_AI) { + spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); + mod_freesync_handle_v_update( + adev->dm.freesync_module, + acrtc->dm_irq_params.stream, + &acrtc->dm_irq_params.vrr_params); + + dc_stream_adjust_vmin_vmax( + adev->dm.dc, + acrtc->dm_irq_params.stream, + &acrtc->dm_irq_params.vrr_params.adjust); + spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); + } + } + } +} + +/** + * dm_crtc_high_irq() - Handles CRTC interrupt + * @interrupt_params: used for determining the CRTC instance + * + * Handles the CRTC/VSYNC interrupt by notfying DRM's VBLANK + * event handler. + */ +static void dm_crtc_high_irq(void *interrupt_params) +{ + struct common_irq_params *irq_params = interrupt_params; + struct amdgpu_device *adev = irq_params->adev; + struct amdgpu_crtc *acrtc; + unsigned long flags; + int vrr_active; + + acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VBLANK); + if (!acrtc) + return; + + vrr_active = amdgpu_dm_crtc_vrr_active_irq(acrtc); + + DC_LOG_VBLANK("crtc:%d, vupdate-vrr:%d, planes:%d\n", acrtc->crtc_id, + vrr_active, acrtc->dm_irq_params.active_planes); + + /** + * Core vblank handling at start of front-porch is only possible + * in non-vrr mode, as only there vblank timestamping will give + * valid results while done in front-porch. Otherwise defer it + * to dm_vupdate_high_irq after end of front-porch. + */ + if (!vrr_active) + amdgpu_dm_crtc_handle_vblank(acrtc); + + /** + * Following stuff must happen at start of vblank, for crc + * computation and below-the-range btr support in vrr mode. + */ + amdgpu_dm_crtc_handle_crc_irq(&acrtc->base); + + /* BTR updates need to happen before VUPDATE on Vega and above. */ + if (adev->family < AMDGPU_FAMILY_AI) + return; + + spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); + + if (acrtc->dm_irq_params.stream && + acrtc->dm_irq_params.vrr_params.supported && + acrtc->dm_irq_params.freesync_config.state == + VRR_STATE_ACTIVE_VARIABLE) { + mod_freesync_handle_v_update(adev->dm.freesync_module, + acrtc->dm_irq_params.stream, + &acrtc->dm_irq_params.vrr_params); + + dc_stream_adjust_vmin_vmax(adev->dm.dc, acrtc->dm_irq_params.stream, + &acrtc->dm_irq_params.vrr_params.adjust); + } + + /* + * If there aren't any active_planes then DCH HUBP may be clock-gated. + * In that case, pageflip completion interrupts won't fire and pageflip + * completion events won't get delivered. Prevent this by sending + * pending pageflip events from here if a flip is still pending. + * + * If any planes are enabled, use dm_pflip_high_irq() instead, to + * avoid race conditions between flip programming and completion, + * which could cause too early flip completion events. + */ + if (adev->family >= AMDGPU_FAMILY_RV && + acrtc->pflip_status == AMDGPU_FLIP_SUBMITTED && + acrtc->dm_irq_params.active_planes == 0) { + if (acrtc->event) { + drm_crtc_send_vblank_event(&acrtc->base, acrtc->event); + acrtc->event = NULL; + drm_crtc_vblank_put(&acrtc->base); + } + acrtc->pflip_status = AMDGPU_FLIP_NONE; + } + + spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); +} + +#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) +/** + * dm_dcn_vertical_interrupt0_high_irq() - Handles OTG Vertical interrupt0 for + * DCN generation ASICs + * @interrupt_params: interrupt parameters + * + * Used to set crc window/read out crc value at vertical line 0 position + */ +static void dm_dcn_vertical_interrupt0_high_irq(void *interrupt_params) +{ + struct common_irq_params *irq_params = interrupt_params; + struct amdgpu_device *adev = irq_params->adev; + struct amdgpu_crtc *acrtc; + + acrtc = get_crtc_by_otg_inst(adev, irq_params->irq_src - IRQ_TYPE_VLINE0); + + if (!acrtc) + return; + + amdgpu_dm_crtc_handle_crc_window_irq(&acrtc->base); +} +#endif /* CONFIG_DRM_AMD_SECURE_DISPLAY */ + +/** + * dmub_aux_setconfig_callback - Callback for AUX or SET_CONFIG command. + * @adev: amdgpu_device pointer + * @notify: dmub notification structure + * + * Dmub AUX or SET_CONFIG command completion processing callback + * Copies dmub notification to DM which is to be read by AUX command. + * issuing thread and also signals the event to wake up the thread. + */ +static void dmub_aux_setconfig_callback(struct amdgpu_device *adev, + struct dmub_notification *notify) +{ + if (adev->dm.dmub_notify) + memcpy(adev->dm.dmub_notify, notify, sizeof(struct dmub_notification)); + if (notify->type == DMUB_NOTIFICATION_AUX_REPLY) + complete(&adev->dm.dmub_aux_transfer_done); +} + +/** + * dmub_hpd_callback - DMUB HPD interrupt processing callback. + * @adev: amdgpu_device pointer + * @notify: dmub notification structure + * + * Dmub Hpd interrupt processing callback. Gets displayindex through the + * ink index and calls helper to do the processing. + */ +static void dmub_hpd_callback(struct amdgpu_device *adev, + struct dmub_notification *notify) +{ + struct amdgpu_dm_connector *aconnector; + struct amdgpu_dm_connector *hpd_aconnector = NULL; + struct drm_connector *connector; + struct drm_connector_list_iter iter; + struct dc_link *link; + u8 link_index = 0; + struct drm_device *dev; + + if (adev == NULL) + return; + + if (notify == NULL) { + DRM_ERROR("DMUB HPD callback notification was NULL"); + return; + } + + if (notify->link_index > adev->dm.dc->link_count) { + DRM_ERROR("DMUB HPD index (%u)is abnormal", notify->link_index); + return; + } + + link_index = notify->link_index; + link = adev->dm.dc->links[link_index]; + dev = adev->dm.ddev; + + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { + aconnector = to_amdgpu_dm_connector(connector); + if (link && aconnector->dc_link == link) { + if (notify->type == DMUB_NOTIFICATION_HPD) + DRM_INFO("DMUB HPD callback: link_index=%u\n", link_index); + else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ) + DRM_INFO("DMUB HPD IRQ callback: link_index=%u\n", link_index); + else + DRM_WARN("DMUB Unknown HPD callback type %d, link_index=%u\n", + notify->type, link_index); + + hpd_aconnector = aconnector; + break; + } + } + drm_connector_list_iter_end(&iter); + + if (hpd_aconnector) { + if (notify->type == DMUB_NOTIFICATION_HPD) + handle_hpd_irq_helper(hpd_aconnector); + else if (notify->type == DMUB_NOTIFICATION_HPD_IRQ) + handle_hpd_rx_irq(hpd_aconnector); + } +} + +/** + * register_dmub_notify_callback - Sets callback for DMUB notify + * @adev: amdgpu_device pointer + * @type: Type of dmub notification + * @callback: Dmub interrupt callback function + * @dmub_int_thread_offload: offload indicator + * + * API to register a dmub callback handler for a dmub notification + * Also sets indicator whether callback processing to be offloaded. + * to dmub interrupt handling thread + * Return: true if successfully registered, false if there is existing registration + */ +static bool register_dmub_notify_callback(struct amdgpu_device *adev, + enum dmub_notification_type type, + dmub_notify_interrupt_callback_t callback, + bool dmub_int_thread_offload) +{ + if (callback != NULL && type < ARRAY_SIZE(adev->dm.dmub_thread_offload)) { + adev->dm.dmub_callback[type] = callback; + adev->dm.dmub_thread_offload[type] = dmub_int_thread_offload; + } else + return false; + + return true; +} + +static void dm_handle_hpd_work(struct work_struct *work) +{ + struct dmub_hpd_work *dmub_hpd_wrk; + + dmub_hpd_wrk = container_of(work, struct dmub_hpd_work, handle_hpd_work); + + if (!dmub_hpd_wrk->dmub_notify) { + DRM_ERROR("dmub_hpd_wrk dmub_notify is NULL"); + return; + } + + if (dmub_hpd_wrk->dmub_notify->type < ARRAY_SIZE(dmub_hpd_wrk->adev->dm.dmub_callback)) { + dmub_hpd_wrk->adev->dm.dmub_callback[dmub_hpd_wrk->dmub_notify->type](dmub_hpd_wrk->adev, + dmub_hpd_wrk->dmub_notify); + } + + kfree(dmub_hpd_wrk->dmub_notify); + kfree(dmub_hpd_wrk); + +} + +#define DMUB_TRACE_MAX_READ 64 +/** + * dm_dmub_outbox1_low_irq() - Handles Outbox interrupt + * @interrupt_params: used for determining the Outbox instance + * + * Handles the Outbox Interrupt + * event handler. + */ +static void dm_dmub_outbox1_low_irq(void *interrupt_params) +{ + struct dmub_notification notify; + struct common_irq_params *irq_params = interrupt_params; + struct amdgpu_device *adev = irq_params->adev; + struct amdgpu_display_manager *dm = &adev->dm; + struct dmcub_trace_buf_entry entry = { 0 }; + u32 count = 0; + struct dmub_hpd_work *dmub_hpd_wrk; + struct dc_link *plink = NULL; + + if (dc_enable_dmub_notifications(adev->dm.dc) && + irq_params->irq_src == DC_IRQ_SOURCE_DMCUB_OUTBOX) { + + do { + dc_stat_get_dmub_notification(adev->dm.dc, ¬ify); + if (notify.type >= ARRAY_SIZE(dm->dmub_thread_offload)) { + DRM_ERROR("DM: notify type %d invalid!", notify.type); + continue; + } + if (!dm->dmub_callback[notify.type]) { + DRM_DEBUG_DRIVER("DMUB notification skipped, no handler: type=%d\n", notify.type); + continue; + } + if (dm->dmub_thread_offload[notify.type] == true) { + dmub_hpd_wrk = kzalloc(sizeof(*dmub_hpd_wrk), GFP_ATOMIC); + if (!dmub_hpd_wrk) { + DRM_ERROR("Failed to allocate dmub_hpd_wrk"); + return; + } + dmub_hpd_wrk->dmub_notify = kmemdup(¬ify, sizeof(struct dmub_notification), + GFP_ATOMIC); + if (!dmub_hpd_wrk->dmub_notify) { + kfree(dmub_hpd_wrk); + DRM_ERROR("Failed to allocate dmub_hpd_wrk->dmub_notify"); + return; + } + INIT_WORK(&dmub_hpd_wrk->handle_hpd_work, dm_handle_hpd_work); + dmub_hpd_wrk->adev = adev; + if (notify.type == DMUB_NOTIFICATION_HPD) { + plink = adev->dm.dc->links[notify.link_index]; + if (plink) { + plink->hpd_status = + notify.hpd_status == DP_HPD_PLUG; + } + } + queue_work(adev->dm.delayed_hpd_wq, &dmub_hpd_wrk->handle_hpd_work); + } else { + dm->dmub_callback[notify.type](adev, ¬ify); + } + } while (notify.pending_notification); + } + + + do { + if (dc_dmub_srv_get_dmub_outbox0_msg(dm->dc, &entry)) { + trace_amdgpu_dmub_trace_high_irq(entry.trace_code, entry.tick_count, + entry.param0, entry.param1); + + DRM_DEBUG_DRIVER("trace_code:%u, tick_count:%u, param0:%u, param1:%u\n", + entry.trace_code, entry.tick_count, entry.param0, entry.param1); + } else + break; + + count++; + + } while (count <= DMUB_TRACE_MAX_READ); + + if (count > DMUB_TRACE_MAX_READ) + DRM_DEBUG_DRIVER("Warning : count > DMUB_TRACE_MAX_READ"); +} + +static int dm_set_clockgating_state(void *handle, + enum amd_clockgating_state state) +{ + return 0; +} + +static int dm_set_powergating_state(void *handle, + enum amd_powergating_state state) +{ + return 0; +} + +/* Prototypes of private functions */ +static int dm_early_init(void *handle); + +/* Allocate memory for FBC compressed data */ +static void amdgpu_dm_fbc_init(struct drm_connector *connector) +{ + struct drm_device *dev = connector->dev; + struct amdgpu_device *adev = drm_to_adev(dev); + struct dm_compressor_info *compressor = &adev->dm.compressor; + struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(connector); + struct drm_display_mode *mode; + unsigned long max_size = 0; + + if (adev->dm.dc->fbc_compressor == NULL) + return; + + if (aconn->dc_link->connector_signal != SIGNAL_TYPE_EDP) + return; + + if (compressor->bo_ptr) + return; + + + list_for_each_entry(mode, &connector->modes, head) { + if (max_size < mode->htotal * mode->vtotal) + max_size = mode->htotal * mode->vtotal; + } + + if (max_size) { + int r = amdgpu_bo_create_kernel(adev, max_size * 4, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_GTT, &compressor->bo_ptr, + &compressor->gpu_addr, &compressor->cpu_addr); + + if (r) + DRM_ERROR("DM: Failed to initialize FBC\n"); + else { + adev->dm.dc->ctx->fbc_gpu_addr = compressor->gpu_addr; + DRM_INFO("DM: FBC alloc %lu\n", max_size*4); + } + + } + +} + +static int amdgpu_dm_audio_component_get_eld(struct device *kdev, int port, + int pipe, bool *enabled, + unsigned char *buf, int max_bytes) +{ + struct drm_device *dev = dev_get_drvdata(kdev); + struct amdgpu_device *adev = drm_to_adev(dev); + struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; + struct amdgpu_dm_connector *aconnector; + int ret = 0; + + *enabled = false; + + mutex_lock(&adev->dm.audio_lock); + + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + aconnector = to_amdgpu_dm_connector(connector); + if (aconnector->audio_inst != port) + continue; + + *enabled = true; + ret = drm_eld_size(connector->eld); + memcpy(buf, connector->eld, min(max_bytes, ret)); + + break; + } + drm_connector_list_iter_end(&conn_iter); + + mutex_unlock(&adev->dm.audio_lock); + + DRM_DEBUG_KMS("Get ELD : idx=%d ret=%d en=%d\n", port, ret, *enabled); + + return ret; +} + +static const struct drm_audio_component_ops amdgpu_dm_audio_component_ops = { + .get_eld = amdgpu_dm_audio_component_get_eld, +}; + +static int amdgpu_dm_audio_component_bind(struct device *kdev, + struct device *hda_kdev, void *data) +{ + struct drm_device *dev = dev_get_drvdata(kdev); + struct amdgpu_device *adev = drm_to_adev(dev); + struct drm_audio_component *acomp = data; + + acomp->ops = &amdgpu_dm_audio_component_ops; + acomp->dev = kdev; + adev->dm.audio_component = acomp; + + return 0; +} + +static void amdgpu_dm_audio_component_unbind(struct device *kdev, + struct device *hda_kdev, void *data) +{ + struct drm_device *dev = dev_get_drvdata(kdev); + struct amdgpu_device *adev = drm_to_adev(dev); + struct drm_audio_component *acomp = data; + + acomp->ops = NULL; + acomp->dev = NULL; + adev->dm.audio_component = NULL; +} + +static const struct component_ops amdgpu_dm_audio_component_bind_ops = { + .bind = amdgpu_dm_audio_component_bind, + .unbind = amdgpu_dm_audio_component_unbind, +}; + +static int amdgpu_dm_audio_init(struct amdgpu_device *adev) +{ + int i, ret; + + if (!amdgpu_audio) + return 0; + + adev->mode_info.audio.enabled = true; + + adev->mode_info.audio.num_pins = adev->dm.dc->res_pool->audio_count; + + for (i = 0; i < adev->mode_info.audio.num_pins; i++) { + adev->mode_info.audio.pin[i].channels = -1; + adev->mode_info.audio.pin[i].rate = -1; + adev->mode_info.audio.pin[i].bits_per_sample = -1; + adev->mode_info.audio.pin[i].status_bits = 0; + adev->mode_info.audio.pin[i].category_code = 0; + adev->mode_info.audio.pin[i].connected = false; + adev->mode_info.audio.pin[i].id = + adev->dm.dc->res_pool->audios[i]->inst; + adev->mode_info.audio.pin[i].offset = 0; + } + + ret = component_add(adev->dev, &amdgpu_dm_audio_component_bind_ops); + if (ret < 0) + return ret; + + adev->dm.audio_registered = true; + + return 0; +} + +static void amdgpu_dm_audio_fini(struct amdgpu_device *adev) +{ + if (!amdgpu_audio) + return; + + if (!adev->mode_info.audio.enabled) + return; + + if (adev->dm.audio_registered) { + component_del(adev->dev, &amdgpu_dm_audio_component_bind_ops); + adev->dm.audio_registered = false; + } + + /* TODO: Disable audio? */ + + adev->mode_info.audio.enabled = false; +} + +static void amdgpu_dm_audio_eld_notify(struct amdgpu_device *adev, int pin) +{ + struct drm_audio_component *acomp = adev->dm.audio_component; + + if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) { + DRM_DEBUG_KMS("Notify ELD: %d\n", pin); + + acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr, + pin, -1); + } +} + +static int dm_dmub_hw_init(struct amdgpu_device *adev) +{ + const struct dmcub_firmware_header_v1_0 *hdr; + struct dmub_srv *dmub_srv = adev->dm.dmub_srv; + struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info; + const struct firmware *dmub_fw = adev->dm.dmub_fw; + struct dmcu *dmcu = adev->dm.dc->res_pool->dmcu; + struct abm *abm = adev->dm.dc->res_pool->abm; + struct dmub_srv_hw_params hw_params; + enum dmub_status status; + const unsigned char *fw_inst_const, *fw_bss_data; + u32 i, fw_inst_const_size, fw_bss_data_size; + bool has_hw_support; + + if (!dmub_srv) + /* DMUB isn't supported on the ASIC. */ + return 0; + + if (!fb_info) { + DRM_ERROR("No framebuffer info for DMUB service.\n"); + return -EINVAL; + } + + if (!dmub_fw) { + /* Firmware required for DMUB support. */ + DRM_ERROR("No firmware provided for DMUB.\n"); + return -EINVAL; + } + + status = dmub_srv_has_hw_support(dmub_srv, &has_hw_support); + if (status != DMUB_STATUS_OK) { + DRM_ERROR("Error checking HW support for DMUB: %d\n", status); + return -EINVAL; + } + + if (!has_hw_support) { + DRM_INFO("DMUB unsupported on ASIC\n"); + return 0; + } + + /* Reset DMCUB if it was previously running - before we overwrite its memory. */ + status = dmub_srv_hw_reset(dmub_srv); + if (status != DMUB_STATUS_OK) + DRM_WARN("Error resetting DMUB HW: %d\n", status); + + hdr = (const struct dmcub_firmware_header_v1_0 *)dmub_fw->data; + + fw_inst_const = dmub_fw->data + + le32_to_cpu(hdr->header.ucode_array_offset_bytes) + + PSP_HEADER_BYTES; + + fw_bss_data = dmub_fw->data + + le32_to_cpu(hdr->header.ucode_array_offset_bytes) + + le32_to_cpu(hdr->inst_const_bytes); + + /* Copy firmware and bios info into FB memory. */ + fw_inst_const_size = le32_to_cpu(hdr->inst_const_bytes) - + PSP_HEADER_BYTES - PSP_FOOTER_BYTES; + + fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes); + + /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP, + * amdgpu_ucode_init_single_fw will load dmub firmware + * fw_inst_const part to cw0; otherwise, the firmware back door load + * will be done by dm_dmub_hw_init + */ + if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { + memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const, + fw_inst_const_size); + } + + if (fw_bss_data_size) + memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, + fw_bss_data, fw_bss_data_size); + + /* Copy firmware bios info into FB memory. */ + memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios, + adev->bios_size); + + /* Reset regions that need to be reset. */ + memset(fb_info->fb[DMUB_WINDOW_4_MAILBOX].cpu_addr, 0, + fb_info->fb[DMUB_WINDOW_4_MAILBOX].size); + + memset(fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr, 0, + fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size); + + memset(fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr, 0, + fb_info->fb[DMUB_WINDOW_6_FW_STATE].size); + + /* Initialize hardware. */ + memset(&hw_params, 0, sizeof(hw_params)); + hw_params.fb_base = adev->gmc.fb_start; + hw_params.fb_offset = adev->vm_manager.vram_base_offset; + + /* backdoor load firmware and trigger dmub running */ + if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) + hw_params.load_inst_const = true; + + if (dmcu) + hw_params.psp_version = dmcu->psp_version; + + for (i = 0; i < fb_info->num_fb; ++i) + hw_params.fb[i] = &fb_info->fb[i]; + + switch (adev->ip_versions[DCE_HWIP][0]) { + case IP_VERSION(3, 1, 3): + case IP_VERSION(3, 1, 4): + hw_params.dpia_supported = true; + hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia; + break; + default: + break; + } + + status = dmub_srv_hw_init(dmub_srv, &hw_params); + if (status != DMUB_STATUS_OK) { + DRM_ERROR("Error initializing DMUB HW: %d\n", status); + return -EINVAL; + } + + /* Wait for firmware load to finish. */ + status = dmub_srv_wait_for_auto_load(dmub_srv, 100000); + if (status != DMUB_STATUS_OK) + DRM_WARN("Wait for DMUB auto-load failed: %d\n", status); + + /* Init DMCU and ABM if available. */ + if (dmcu && abm) { + dmcu->funcs->dmcu_init(dmcu); + abm->dmcu_is_running = dmcu->funcs->is_dmcu_initialized(dmcu); + } + + if (!adev->dm.dc->ctx->dmub_srv) + adev->dm.dc->ctx->dmub_srv = dc_dmub_srv_create(adev->dm.dc, dmub_srv); + if (!adev->dm.dc->ctx->dmub_srv) { + DRM_ERROR("Couldn't allocate DC DMUB server!\n"); + return -ENOMEM; + } + + DRM_INFO("DMUB hardware initialized: version=0x%08X\n", + adev->dm.dmcub_fw_version); + + return 0; +} + +static void dm_dmub_hw_resume(struct amdgpu_device *adev) +{ + struct dmub_srv *dmub_srv = adev->dm.dmub_srv; + enum dmub_status status; + bool init; + + if (!dmub_srv) { + /* DMUB isn't supported on the ASIC. */ + return; + } + + status = dmub_srv_is_hw_init(dmub_srv, &init); + if (status != DMUB_STATUS_OK) + DRM_WARN("DMUB hardware init check failed: %d\n", status); + + if (status == DMUB_STATUS_OK && init) { + /* Wait for firmware load to finish. */ + status = dmub_srv_wait_for_auto_load(dmub_srv, 100000); + if (status != DMUB_STATUS_OK) + DRM_WARN("Wait for DMUB auto-load failed: %d\n", status); + } else { + /* Perform the full hardware initialization. */ + dm_dmub_hw_init(adev); + } +} + +static void mmhub_read_system_context(struct amdgpu_device *adev, struct dc_phy_addr_space_config *pa_config) +{ + u64 pt_base; + u32 logical_addr_low; + u32 logical_addr_high; + u32 agp_base, agp_bot, agp_top; + PHYSICAL_ADDRESS_LOC page_table_start, page_table_end, page_table_base; + + memset(pa_config, 0, sizeof(*pa_config)); + + agp_base = 0; + agp_bot = adev->gmc.agp_start >> 24; + agp_top = adev->gmc.agp_end >> 24; + + /* AGP aperture is disabled */ + if (agp_bot == agp_top) { + logical_addr_low = adev->gmc.fb_start >> 18; + if (adev->apu_flags & AMD_APU_IS_RAVEN2) + /* + * Raven2 has a HW issue that it is unable to use the vram which + * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the + * workaround that increase system aperture high address (add 1) + * to get rid of the VM fault and hardware hang. + */ + logical_addr_high = (adev->gmc.fb_end >> 18) + 0x1; + else + logical_addr_high = adev->gmc.fb_end >> 18; + } else { + logical_addr_low = min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18; + if (adev->apu_flags & AMD_APU_IS_RAVEN2) + /* + * Raven2 has a HW issue that it is unable to use the vram which + * is out of MC_VM_SYSTEM_APERTURE_HIGH_ADDR. So here is the + * workaround that increase system aperture high address (add 1) + * to get rid of the VM fault and hardware hang. + */ + logical_addr_high = max((adev->gmc.fb_end >> 18) + 0x1, adev->gmc.agp_end >> 18); + else + logical_addr_high = max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18; + } + + pt_base = amdgpu_gmc_pd_addr(adev->gart.bo); + + page_table_start.high_part = upper_32_bits(adev->gmc.gart_start >> + AMDGPU_GPU_PAGE_SHIFT); + page_table_start.low_part = lower_32_bits(adev->gmc.gart_start >> + AMDGPU_GPU_PAGE_SHIFT); + page_table_end.high_part = upper_32_bits(adev->gmc.gart_end >> + AMDGPU_GPU_PAGE_SHIFT); + page_table_end.low_part = lower_32_bits(adev->gmc.gart_end >> + AMDGPU_GPU_PAGE_SHIFT); + page_table_base.high_part = upper_32_bits(pt_base); + page_table_base.low_part = lower_32_bits(pt_base); + + pa_config->system_aperture.start_addr = (uint64_t)logical_addr_low << 18; + pa_config->system_aperture.end_addr = (uint64_t)logical_addr_high << 18; + + pa_config->system_aperture.agp_base = (uint64_t)agp_base << 24; + pa_config->system_aperture.agp_bot = (uint64_t)agp_bot << 24; + pa_config->system_aperture.agp_top = (uint64_t)agp_top << 24; + + pa_config->system_aperture.fb_base = adev->gmc.fb_start; + pa_config->system_aperture.fb_offset = adev->vm_manager.vram_base_offset; + pa_config->system_aperture.fb_top = adev->gmc.fb_end; + + pa_config->gart_config.page_table_start_addr = page_table_start.quad_part << 12; + pa_config->gart_config.page_table_end_addr = page_table_end.quad_part << 12; + pa_config->gart_config.page_table_base_addr = page_table_base.quad_part; + + pa_config->is_hvm_enabled = adev->mode_info.gpu_vm_support; + +} + +static void force_connector_state( + struct amdgpu_dm_connector *aconnector, + enum drm_connector_force force_state) +{ + struct drm_connector *connector = &aconnector->base; + + mutex_lock(&connector->dev->mode_config.mutex); + aconnector->base.force = force_state; + mutex_unlock(&connector->dev->mode_config.mutex); + + mutex_lock(&aconnector->hpd_lock); + drm_kms_helper_connector_hotplug_event(connector); + mutex_unlock(&aconnector->hpd_lock); +} + +static void dm_handle_hpd_rx_offload_work(struct work_struct *work) +{ + struct hpd_rx_irq_offload_work *offload_work; + struct amdgpu_dm_connector *aconnector; + struct dc_link *dc_link; + struct amdgpu_device *adev; + enum dc_connection_type new_connection_type = dc_connection_none; + unsigned long flags; + union test_response test_response; + + memset(&test_response, 0, sizeof(test_response)); + + offload_work = container_of(work, struct hpd_rx_irq_offload_work, work); + aconnector = offload_work->offload_wq->aconnector; + + if (!aconnector) { + DRM_ERROR("Can't retrieve aconnector in hpd_rx_irq_offload_work"); + goto skip; + } + + adev = drm_to_adev(aconnector->base.dev); + dc_link = aconnector->dc_link; + + mutex_lock(&aconnector->hpd_lock); + if (!dc_link_detect_connection_type(dc_link, &new_connection_type)) + DRM_ERROR("KMS: Failed to detect connector\n"); + mutex_unlock(&aconnector->hpd_lock); + + if (new_connection_type == dc_connection_none) + goto skip; + + if (amdgpu_in_reset(adev)) + goto skip; + + if (offload_work->data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY || + offload_work->data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) { + dm_handle_mst_sideband_msg_ready_event(&aconnector->mst_mgr, DOWN_OR_UP_MSG_RDY_EVENT); + spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags); + offload_work->offload_wq->is_handling_mst_msg_rdy_event = false; + spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags); + goto skip; + } + + mutex_lock(&adev->dm.dc_lock); + if (offload_work->data.bytes.device_service_irq.bits.AUTOMATED_TEST) { + dc_link_dp_handle_automated_test(dc_link); + + if (aconnector->timing_changed) { + /* force connector disconnect and reconnect */ + force_connector_state(aconnector, DRM_FORCE_OFF); + msleep(100); + force_connector_state(aconnector, DRM_FORCE_UNSPECIFIED); + } + + test_response.bits.ACK = 1; + + core_link_write_dpcd( + dc_link, + DP_TEST_RESPONSE, + &test_response.raw, + sizeof(test_response)); + } else if ((dc_link->connector_signal != SIGNAL_TYPE_EDP) && + dc_link_check_link_loss_status(dc_link, &offload_work->data) && + dc_link_dp_allow_hpd_rx_irq(dc_link)) { + /* offload_work->data is from handle_hpd_rx_irq-> + * schedule_hpd_rx_offload_work.this is defer handle + * for hpd short pulse. upon here, link status may be + * changed, need get latest link status from dpcd + * registers. if link status is good, skip run link + * training again. + */ + union hpd_irq_data irq_data; + + memset(&irq_data, 0, sizeof(irq_data)); + + /* before dc_link_dp_handle_link_loss, allow new link lost handle + * request be added to work queue if link lost at end of dc_link_ + * dp_handle_link_loss + */ + spin_lock_irqsave(&offload_work->offload_wq->offload_lock, flags); + offload_work->offload_wq->is_handling_link_loss = false; + spin_unlock_irqrestore(&offload_work->offload_wq->offload_lock, flags); + + if ((dc_link_dp_read_hpd_rx_irq_data(dc_link, &irq_data) == DC_OK) && + dc_link_check_link_loss_status(dc_link, &irq_data)) + dc_link_dp_handle_link_loss(dc_link); + } + mutex_unlock(&adev->dm.dc_lock); + +skip: + kfree(offload_work); + +} + +static struct hpd_rx_irq_offload_work_queue *hpd_rx_irq_create_workqueue(struct dc *dc) +{ + int max_caps = dc->caps.max_links; + int i = 0; + struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq = NULL; + + hpd_rx_offload_wq = kcalloc(max_caps, sizeof(*hpd_rx_offload_wq), GFP_KERNEL); + + if (!hpd_rx_offload_wq) + return NULL; + + + for (i = 0; i < max_caps; i++) { + hpd_rx_offload_wq[i].wq = + create_singlethread_workqueue("amdgpu_dm_hpd_rx_offload_wq"); + + if (hpd_rx_offload_wq[i].wq == NULL) { + DRM_ERROR("create amdgpu_dm_hpd_rx_offload_wq fail!"); + goto out_err; + } + + spin_lock_init(&hpd_rx_offload_wq[i].offload_lock); + } + + return hpd_rx_offload_wq; + +out_err: + for (i = 0; i < max_caps; i++) { + if (hpd_rx_offload_wq[i].wq) + destroy_workqueue(hpd_rx_offload_wq[i].wq); + } + kfree(hpd_rx_offload_wq); + return NULL; +} + +struct amdgpu_stutter_quirk { + u16 chip_vendor; + u16 chip_device; + u16 subsys_vendor; + u16 subsys_device; + u8 revision; +}; + +static const struct amdgpu_stutter_quirk amdgpu_stutter_quirk_list[] = { + /* https://bugzilla.kernel.org/show_bug.cgi?id=214417 */ + { 0x1002, 0x15dd, 0x1002, 0x15dd, 0xc8 }, + { 0, 0, 0, 0, 0 }, +}; + +static bool dm_should_disable_stutter(struct pci_dev *pdev) +{ + const struct amdgpu_stutter_quirk *p = amdgpu_stutter_quirk_list; + + while (p && p->chip_device != 0) { + if (pdev->vendor == p->chip_vendor && + pdev->device == p->chip_device && + pdev->subsystem_vendor == p->subsys_vendor && + pdev->subsystem_device == p->subsys_device && + pdev->revision == p->revision) { + return true; + } + ++p; + } + return false; +} + +static const struct dmi_system_id hpd_disconnect_quirk_table[] = { + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3660"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3260"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "Precision 3460"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower Plus 7010"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Tower 7010"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF Plus 7010"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex SFF 7010"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro Plus 7010"), + }, + }, + { + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex Micro 7010"), + }, + }, + {} + /* TODO: refactor this from a fixed table to a dynamic option */ +}; + +static void retrieve_dmi_info(struct amdgpu_display_manager *dm) +{ + const struct dmi_system_id *dmi_id; + + dm->aux_hpd_discon_quirk = false; + + dmi_id = dmi_first_match(hpd_disconnect_quirk_table); + if (dmi_id) { + dm->aux_hpd_discon_quirk = true; + DRM_INFO("aux_hpd_discon_quirk attached\n"); + } +} + +static int amdgpu_dm_init(struct amdgpu_device *adev) +{ + struct dc_init_data init_data; + struct dc_callback_init init_params; + int r; + + adev->dm.ddev = adev_to_drm(adev); + adev->dm.adev = adev; + + /* Zero all the fields */ + memset(&init_data, 0, sizeof(init_data)); + memset(&init_params, 0, sizeof(init_params)); + + mutex_init(&adev->dm.dpia_aux_lock); + mutex_init(&adev->dm.dc_lock); + mutex_init(&adev->dm.audio_lock); + + if (amdgpu_dm_irq_init(adev)) { + DRM_ERROR("amdgpu: failed to initialize DM IRQ support.\n"); + goto error; + } + + init_data.asic_id.chip_family = adev->family; + + init_data.asic_id.pci_revision_id = adev->pdev->revision; + init_data.asic_id.hw_internal_rev = adev->external_rev_id; + init_data.asic_id.chip_id = adev->pdev->device; + + init_data.asic_id.vram_width = adev->gmc.vram_width; + /* TODO: initialize init_data.asic_id.vram_type here!!!! */ + init_data.asic_id.atombios_base_address = + adev->mode_info.atom_context->bios; + + init_data.driver = adev; + + adev->dm.cgs_device = amdgpu_cgs_create_device(adev); + + if (!adev->dm.cgs_device) { + DRM_ERROR("amdgpu: failed to create cgs device.\n"); + goto error; + } + + init_data.cgs_device = adev->dm.cgs_device; + + init_data.dce_environment = DCE_ENV_PRODUCTION_DRV; + + switch (adev->ip_versions[DCE_HWIP][0]) { + case IP_VERSION(2, 1, 0): + switch (adev->dm.dmcub_fw_version) { + case 0: /* development */ + case 0x1: /* linux-firmware.git hash 6d9f399 */ + case 0x01000000: /* linux-firmware.git hash 9a0b0f4 */ + init_data.flags.disable_dmcu = false; + break; + default: + init_data.flags.disable_dmcu = true; + } + break; + case IP_VERSION(2, 0, 3): + init_data.flags.disable_dmcu = true; + break; + default: + break; + } + + switch (adev->asic_type) { + case CHIP_CARRIZO: + case CHIP_STONEY: + init_data.flags.gpu_vm_support = true; + break; + default: + switch (adev->ip_versions[DCE_HWIP][0]) { + case IP_VERSION(1, 0, 0): + case IP_VERSION(1, 0, 1): + /* enable S/G on PCO and RV2 */ + if ((adev->apu_flags & AMD_APU_IS_RAVEN2) || + (adev->apu_flags & AMD_APU_IS_PICASSO)) + init_data.flags.gpu_vm_support = true; + break; + case IP_VERSION(2, 1, 0): + case IP_VERSION(3, 0, 1): + case IP_VERSION(3, 1, 2): + case IP_VERSION(3, 1, 3): + case IP_VERSION(3, 1, 4): + case IP_VERSION(3, 1, 5): + case IP_VERSION(3, 1, 6): + init_data.flags.gpu_vm_support = true; + break; + default: + break; + } + break; + } + if (init_data.flags.gpu_vm_support && + (amdgpu_sg_display == 0)) + init_data.flags.gpu_vm_support = false; + + if (init_data.flags.gpu_vm_support) + adev->mode_info.gpu_vm_support = true; + + if (amdgpu_dc_feature_mask & DC_FBC_MASK) + init_data.flags.fbc_support = true; + + if (amdgpu_dc_feature_mask & DC_MULTI_MON_PP_MCLK_SWITCH_MASK) + init_data.flags.multi_mon_pp_mclk_switch = true; + + if (amdgpu_dc_feature_mask & DC_DISABLE_FRACTIONAL_PWM_MASK) + init_data.flags.disable_fractional_pwm = true; + + if (amdgpu_dc_feature_mask & DC_EDP_NO_POWER_SEQUENCING) + init_data.flags.edp_no_power_sequencing = true; + + if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP1_4A) + init_data.flags.allow_lttpr_non_transparent_mode.bits.DP1_4A = true; + if (amdgpu_dc_feature_mask & DC_DISABLE_LTTPR_DP2_0) + init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true; + + init_data.flags.seamless_boot_edp_requested = false; + + if (check_seamless_boot_capability(adev)) { + init_data.flags.seamless_boot_edp_requested = true; + init_data.flags.allow_seamless_boot_optimization = true; + DRM_INFO("Seamless boot condition check passed\n"); + } + + init_data.flags.enable_mipi_converter_optimization = true; + + init_data.dcn_reg_offsets = adev->reg_offset[DCE_HWIP][0]; + init_data.nbio_reg_offsets = adev->reg_offset[NBIO_HWIP][0]; + + INIT_LIST_HEAD(&adev->dm.da_list); + + retrieve_dmi_info(&adev->dm); + + /* Display Core create. */ + adev->dm.dc = dc_create(&init_data); + + if (adev->dm.dc) { + DRM_INFO("Display Core v%s initialized on %s\n", DC_VER, + dce_version_to_string(adev->dm.dc->ctx->dce_version)); + } else { + DRM_INFO("Display Core failed to initialize with v%s!\n", DC_VER); + goto error; + } + + if (amdgpu_dc_debug_mask & DC_DISABLE_PIPE_SPLIT) { + adev->dm.dc->debug.force_single_disp_pipe_split = false; + adev->dm.dc->debug.pipe_split_policy = MPC_SPLIT_AVOID; + } + + if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY) + adev->dm.dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true; + if (dm_should_disable_stutter(adev->pdev)) + adev->dm.dc->debug.disable_stutter = true; + + if (amdgpu_dc_debug_mask & DC_DISABLE_STUTTER) + adev->dm.dc->debug.disable_stutter = true; + + if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) + adev->dm.dc->debug.disable_dsc = true; + + if (amdgpu_dc_debug_mask & DC_DISABLE_CLOCK_GATING) + adev->dm.dc->debug.disable_clock_gate = true; + + if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH) + adev->dm.dc->debug.force_subvp_mclk_switch = true; + + adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm; + + /* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */ + adev->dm.dc->debug.ignore_cable_id = true; + + /* TODO: There is a new drm mst change where the freedom of + * vc_next_start_slot update is revoked/moved into drm, instead of in + * driver. This forces us to make sure to get vc_next_start_slot updated + * in drm function each time without considering if mst_state is active + * or not. Otherwise, next time hotplug will give wrong start_slot + * number. We are implementing a temporary solution to even notify drm + * mst deallocation when link is no longer of MST type when uncommitting + * the stream so we will have more time to work on a proper solution. + * Ideally when dm_helpers_dp_mst_stop_top_mgr message is triggered, we + * should notify drm to do a complete "reset" of its states and stop + * calling further drm mst functions when link is no longer of an MST + * type. This could happen when we unplug an MST hubs/displays. When + * uncommit stream comes later after unplug, we should just reset + * hardware states only. + */ + adev->dm.dc->debug.temp_mst_deallocation_sequence = true; + + if (adev->dm.dc->caps.dp_hdmi21_pcon_support) + DRM_INFO("DP-HDMI FRL PCON supported\n"); + + r = dm_dmub_hw_init(adev); + if (r) { + DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); + goto error; + } + + dc_hardware_init(adev->dm.dc); + + adev->dm.hpd_rx_offload_wq = hpd_rx_irq_create_workqueue(adev->dm.dc); + if (!adev->dm.hpd_rx_offload_wq) { + DRM_ERROR("amdgpu: failed to create hpd rx offload workqueue.\n"); + goto error; + } + + if ((adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_CARRIZO)) { + struct dc_phy_addr_space_config pa_config; + + mmhub_read_system_context(adev, &pa_config); + + // Call the DC init_memory func + dc_setup_system_context(adev->dm.dc, &pa_config); + } + + adev->dm.freesync_module = mod_freesync_create(adev->dm.dc); + if (!adev->dm.freesync_module) { + DRM_ERROR( + "amdgpu: failed to initialize freesync_module.\n"); + } else + DRM_DEBUG_DRIVER("amdgpu: freesync_module init done %p.\n", + adev->dm.freesync_module); + + amdgpu_dm_init_color_mod(); + + if (adev->dm.dc->caps.max_links > 0) { + adev->dm.vblank_control_workqueue = + create_singlethread_workqueue("dm_vblank_control_workqueue"); + if (!adev->dm.vblank_control_workqueue) + DRM_ERROR("amdgpu: failed to initialize vblank_workqueue.\n"); + } + + if (adev->dm.dc->caps.max_links > 0 && adev->family >= AMDGPU_FAMILY_RV) { + adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc); + + if (!adev->dm.hdcp_workqueue) + DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n"); + else + DRM_DEBUG_DRIVER("amdgpu: hdcp_workqueue init done %p.\n", adev->dm.hdcp_workqueue); + + dc_init_callbacks(adev->dm.dc, &init_params); + } + if (dc_is_dmub_outbox_supported(adev->dm.dc)) { + init_completion(&adev->dm.dmub_aux_transfer_done); + adev->dm.dmub_notify = kzalloc(sizeof(struct dmub_notification), GFP_KERNEL); + if (!adev->dm.dmub_notify) { + DRM_INFO("amdgpu: fail to allocate adev->dm.dmub_notify"); + goto error; + } + + adev->dm.delayed_hpd_wq = create_singlethread_workqueue("amdgpu_dm_hpd_wq"); + if (!adev->dm.delayed_hpd_wq) { + DRM_ERROR("amdgpu: failed to create hpd offload workqueue.\n"); + goto error; + } + + amdgpu_dm_outbox_init(adev); + if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_AUX_REPLY, + dmub_aux_setconfig_callback, false)) { + DRM_ERROR("amdgpu: fail to register dmub aux callback"); + goto error; + } + if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD, dmub_hpd_callback, true)) { + DRM_ERROR("amdgpu: fail to register dmub hpd callback"); + goto error; + } + if (!register_dmub_notify_callback(adev, DMUB_NOTIFICATION_HPD_IRQ, dmub_hpd_callback, true)) { + DRM_ERROR("amdgpu: fail to register dmub hpd callback"); + goto error; + } + } + + /* Enable outbox notification only after IRQ handlers are registered and DMUB is alive. + * It is expected that DMUB will resend any pending notifications at this point, for + * example HPD from DPIA. + */ + if (dc_is_dmub_outbox_supported(adev->dm.dc)) { + dc_enable_dmub_outbox(adev->dm.dc); + + /* DPIA trace goes to dmesg logs only if outbox is enabled */ + if (amdgpu_dc_debug_mask & DC_ENABLE_DPIA_TRACE) + dc_dmub_srv_enable_dpia_trace(adev->dm.dc); + } + + if (amdgpu_dm_initialize_drm_device(adev)) { + DRM_ERROR( + "amdgpu: failed to initialize sw for display support.\n"); + goto error; + } + + /* create fake encoders for MST */ + dm_dp_create_fake_mst_encoders(adev); + + /* TODO: Add_display_info? */ + + /* TODO use dynamic cursor width */ + adev_to_drm(adev)->mode_config.cursor_width = adev->dm.dc->caps.max_cursor_size; + adev_to_drm(adev)->mode_config.cursor_height = adev->dm.dc->caps.max_cursor_size; + + if (drm_vblank_init(adev_to_drm(adev), adev->dm.display_indexes_num)) { + DRM_ERROR( + "amdgpu: failed to initialize sw for display support.\n"); + goto error; + } + +#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) + adev->dm.secure_display_ctxs = amdgpu_dm_crtc_secure_display_create_contexts(adev); + if (!adev->dm.secure_display_ctxs) + DRM_ERROR("amdgpu: failed to initialize secure display contexts.\n"); +#endif + + DRM_DEBUG_DRIVER("KMS initialized.\n"); + + return 0; +error: + amdgpu_dm_fini(adev); + + return -EINVAL; +} + +static int amdgpu_dm_early_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + amdgpu_dm_audio_fini(adev); + + return 0; +} + +static void amdgpu_dm_fini(struct amdgpu_device *adev) +{ + int i; + + if (adev->dm.vblank_control_workqueue) { + destroy_workqueue(adev->dm.vblank_control_workqueue); + adev->dm.vblank_control_workqueue = NULL; + } + + amdgpu_dm_destroy_drm_device(&adev->dm); + +#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) + if (adev->dm.secure_display_ctxs) { + for (i = 0; i < adev->mode_info.num_crtc; i++) { + if (adev->dm.secure_display_ctxs[i].crtc) { + flush_work(&adev->dm.secure_display_ctxs[i].notify_ta_work); + flush_work(&adev->dm.secure_display_ctxs[i].forward_roi_work); + } + } + kfree(adev->dm.secure_display_ctxs); + adev->dm.secure_display_ctxs = NULL; + } +#endif + if (adev->dm.hdcp_workqueue) { + hdcp_destroy(&adev->dev->kobj, adev->dm.hdcp_workqueue); + adev->dm.hdcp_workqueue = NULL; + } + + if (adev->dm.dc) + dc_deinit_callbacks(adev->dm.dc); + + if (adev->dm.dc) + dc_dmub_srv_destroy(&adev->dm.dc->ctx->dmub_srv); + + if (dc_enable_dmub_notifications(adev->dm.dc)) { + kfree(adev->dm.dmub_notify); + adev->dm.dmub_notify = NULL; + destroy_workqueue(adev->dm.delayed_hpd_wq); + adev->dm.delayed_hpd_wq = NULL; + } + + if (adev->dm.dmub_bo) + amdgpu_bo_free_kernel(&adev->dm.dmub_bo, + &adev->dm.dmub_bo_gpu_addr, + &adev->dm.dmub_bo_cpu_addr); + + if (adev->dm.hpd_rx_offload_wq) { + for (i = 0; i < adev->dm.dc->caps.max_links; i++) { + if (adev->dm.hpd_rx_offload_wq[i].wq) { + destroy_workqueue(adev->dm.hpd_rx_offload_wq[i].wq); + adev->dm.hpd_rx_offload_wq[i].wq = NULL; + } + } + + kfree(adev->dm.hpd_rx_offload_wq); + adev->dm.hpd_rx_offload_wq = NULL; + } + + /* DC Destroy TODO: Replace destroy DAL */ + if (adev->dm.dc) + dc_destroy(&adev->dm.dc); + /* + * TODO: pageflip, vlank interrupt + * + * amdgpu_dm_irq_fini(adev); + */ + + if (adev->dm.cgs_device) { + amdgpu_cgs_destroy_device(adev->dm.cgs_device); + adev->dm.cgs_device = NULL; + } + if (adev->dm.freesync_module) { + mod_freesync_destroy(adev->dm.freesync_module); + adev->dm.freesync_module = NULL; + } + + mutex_destroy(&adev->dm.audio_lock); + mutex_destroy(&adev->dm.dc_lock); + mutex_destroy(&adev->dm.dpia_aux_lock); +} + +static int load_dmcu_fw(struct amdgpu_device *adev) +{ + const char *fw_name_dmcu = NULL; + int r; + const struct dmcu_firmware_header_v1_0 *hdr; + + switch (adev->asic_type) { +#if defined(CONFIG_DRM_AMD_DC_SI) + case CHIP_TAHITI: + case CHIP_PITCAIRN: + case CHIP_VERDE: + case CHIP_OLAND: +#endif + case CHIP_BONAIRE: + case CHIP_HAWAII: + case CHIP_KAVERI: + case CHIP_KABINI: + case CHIP_MULLINS: + case CHIP_TONGA: + case CHIP_FIJI: + case CHIP_CARRIZO: + case CHIP_STONEY: + case CHIP_POLARIS11: + case CHIP_POLARIS10: + case CHIP_POLARIS12: + case CHIP_VEGAM: + case CHIP_VEGA10: + case CHIP_VEGA12: + case CHIP_VEGA20: + return 0; + case CHIP_NAVI12: + fw_name_dmcu = FIRMWARE_NAVI12_DMCU; + break; + case CHIP_RAVEN: + if (ASICREV_IS_PICASSO(adev->external_rev_id)) + fw_name_dmcu = FIRMWARE_RAVEN_DMCU; + else if (ASICREV_IS_RAVEN2(adev->external_rev_id)) + fw_name_dmcu = FIRMWARE_RAVEN_DMCU; + else + return 0; + break; + default: + switch (adev->ip_versions[DCE_HWIP][0]) { + case IP_VERSION(2, 0, 2): + case IP_VERSION(2, 0, 3): + case IP_VERSION(2, 0, 0): + case IP_VERSION(2, 1, 0): + case IP_VERSION(3, 0, 0): + case IP_VERSION(3, 0, 2): + case IP_VERSION(3, 0, 3): + case IP_VERSION(3, 0, 1): + case IP_VERSION(3, 1, 2): + case IP_VERSION(3, 1, 3): + case IP_VERSION(3, 1, 4): + case IP_VERSION(3, 1, 5): + case IP_VERSION(3, 1, 6): + case IP_VERSION(3, 2, 0): + case IP_VERSION(3, 2, 1): + return 0; + default: + break; + } + DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type); + return -EINVAL; + } + + if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) { + DRM_DEBUG_KMS("dm: DMCU firmware not supported on direct or SMU loading\n"); + return 0; + } + + r = amdgpu_ucode_request(adev, &adev->dm.fw_dmcu, fw_name_dmcu); + if (r == -ENODEV) { + /* DMCU firmware is not necessary, so don't raise a fuss if it's missing */ + DRM_DEBUG_KMS("dm: DMCU firmware not found\n"); + adev->dm.fw_dmcu = NULL; + return 0; + } + if (r) { + dev_err(adev->dev, "amdgpu_dm: Can't validate firmware \"%s\"\n", + fw_name_dmcu); + amdgpu_ucode_release(&adev->dm.fw_dmcu); + return r; + } + + hdr = (const struct dmcu_firmware_header_v1_0 *)adev->dm.fw_dmcu->data; + adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].ucode_id = AMDGPU_UCODE_ID_DMCU_ERAM; + adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_ERAM].fw = adev->dm.fw_dmcu; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(hdr->header.ucode_size_bytes) - le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE); + + adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].ucode_id = AMDGPU_UCODE_ID_DMCU_INTV; + adev->firmware.ucode[AMDGPU_UCODE_ID_DMCU_INTV].fw = adev->dm.fw_dmcu; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(hdr->intv_size_bytes), PAGE_SIZE); + + adev->dm.dmcu_fw_version = le32_to_cpu(hdr->header.ucode_version); + + DRM_DEBUG_KMS("PSP loading DMCU firmware\n"); + + return 0; +} + +static uint32_t amdgpu_dm_dmub_reg_read(void *ctx, uint32_t address) +{ + struct amdgpu_device *adev = ctx; + + return dm_read_reg(adev->dm.dc->ctx, address); +} + +static void amdgpu_dm_dmub_reg_write(void *ctx, uint32_t address, + uint32_t value) +{ + struct amdgpu_device *adev = ctx; + + return dm_write_reg(adev->dm.dc->ctx, address, value); +} + +static int dm_dmub_sw_init(struct amdgpu_device *adev) +{ + struct dmub_srv_create_params create_params; + struct dmub_srv_region_params region_params; + struct dmub_srv_region_info region_info; + struct dmub_srv_memory_params memory_params; + struct dmub_srv_fb_info *fb_info; + struct dmub_srv *dmub_srv; + const struct dmcub_firmware_header_v1_0 *hdr; + enum dmub_asic dmub_asic; + enum dmub_status status; + int r; + + switch (adev->ip_versions[DCE_HWIP][0]) { + case IP_VERSION(2, 1, 0): + dmub_asic = DMUB_ASIC_DCN21; + break; + case IP_VERSION(3, 0, 0): + dmub_asic = DMUB_ASIC_DCN30; + break; + case IP_VERSION(3, 0, 1): + dmub_asic = DMUB_ASIC_DCN301; + break; + case IP_VERSION(3, 0, 2): + dmub_asic = DMUB_ASIC_DCN302; + break; + case IP_VERSION(3, 0, 3): + dmub_asic = DMUB_ASIC_DCN303; + break; + case IP_VERSION(3, 1, 2): + case IP_VERSION(3, 1, 3): + dmub_asic = (adev->external_rev_id == YELLOW_CARP_B0) ? DMUB_ASIC_DCN31B : DMUB_ASIC_DCN31; + break; + case IP_VERSION(3, 1, 4): + dmub_asic = DMUB_ASIC_DCN314; + break; + case IP_VERSION(3, 1, 5): + dmub_asic = DMUB_ASIC_DCN315; + break; + case IP_VERSION(3, 1, 6): + dmub_asic = DMUB_ASIC_DCN316; + break; + case IP_VERSION(3, 2, 0): + dmub_asic = DMUB_ASIC_DCN32; + break; + case IP_VERSION(3, 2, 1): + dmub_asic = DMUB_ASIC_DCN321; + break; + default: + /* ASIC doesn't support DMUB. */ + return 0; + } + + hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data; + adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version); + + if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) { + adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id = + AMDGPU_UCODE_ID_DMCUB; + adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw = + adev->dm.dmub_fw; + adev->firmware.fw_size += + ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE); + + DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n", + adev->dm.dmcub_fw_version); + } + + + adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL); + dmub_srv = adev->dm.dmub_srv; + + if (!dmub_srv) { + DRM_ERROR("Failed to allocate DMUB service!\n"); + return -ENOMEM; + } + + memset(&create_params, 0, sizeof(create_params)); + create_params.user_ctx = adev; + create_params.funcs.reg_read = amdgpu_dm_dmub_reg_read; + create_params.funcs.reg_write = amdgpu_dm_dmub_reg_write; + create_params.asic = dmub_asic; + + /* Create the DMUB service. */ + status = dmub_srv_create(dmub_srv, &create_params); + if (status != DMUB_STATUS_OK) { + DRM_ERROR("Error creating DMUB service: %d\n", status); + return -EINVAL; + } + + /* Calculate the size of all the regions for the DMUB service. */ + memset(®ion_params, 0, sizeof(region_params)); + + region_params.inst_const_size = le32_to_cpu(hdr->inst_const_bytes) - + PSP_HEADER_BYTES - PSP_FOOTER_BYTES; + region_params.bss_data_size = le32_to_cpu(hdr->bss_data_bytes); + region_params.vbios_size = adev->bios_size; + region_params.fw_bss_data = region_params.bss_data_size ? + adev->dm.dmub_fw->data + + le32_to_cpu(hdr->header.ucode_array_offset_bytes) + + le32_to_cpu(hdr->inst_const_bytes) : NULL; + region_params.fw_inst_const = + adev->dm.dmub_fw->data + + le32_to_cpu(hdr->header.ucode_array_offset_bytes) + + PSP_HEADER_BYTES; + region_params.is_mailbox_in_inbox = false; + + status = dmub_srv_calc_region_info(dmub_srv, ®ion_params, + ®ion_info); + + if (status != DMUB_STATUS_OK) { + DRM_ERROR("Error calculating DMUB region info: %d\n", status); + return -EINVAL; + } + + /* + * Allocate a framebuffer based on the total size of all the regions. + * TODO: Move this into GART. + */ + r = amdgpu_bo_create_kernel(adev, region_info.fb_size, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM | + AMDGPU_GEM_DOMAIN_GTT, + &adev->dm.dmub_bo, + &adev->dm.dmub_bo_gpu_addr, + &adev->dm.dmub_bo_cpu_addr); + if (r) + return r; + + /* Rebase the regions on the framebuffer address. */ + memset(&memory_params, 0, sizeof(memory_params)); + memory_params.cpu_fb_addr = adev->dm.dmub_bo_cpu_addr; + memory_params.gpu_fb_addr = adev->dm.dmub_bo_gpu_addr; + memory_params.region_info = ®ion_info; + + adev->dm.dmub_fb_info = + kzalloc(sizeof(*adev->dm.dmub_fb_info), GFP_KERNEL); + fb_info = adev->dm.dmub_fb_info; + + if (!fb_info) { + DRM_ERROR( + "Failed to allocate framebuffer info for DMUB service!\n"); + return -ENOMEM; + } + + status = dmub_srv_calc_mem_info(dmub_srv, &memory_params, fb_info); + if (status != DMUB_STATUS_OK) { + DRM_ERROR("Error calculating DMUB FB info: %d\n", status); + return -EINVAL; + } + + return 0; +} + +static int dm_sw_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int r; + + r = dm_dmub_sw_init(adev); + if (r) + return r; + + return load_dmcu_fw(adev); +} + +static int dm_sw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + kfree(adev->dm.dmub_fb_info); + adev->dm.dmub_fb_info = NULL; + + if (adev->dm.dmub_srv) { + dmub_srv_destroy(adev->dm.dmub_srv); + adev->dm.dmub_srv = NULL; + } + + amdgpu_ucode_release(&adev->dm.dmub_fw); + amdgpu_ucode_release(&adev->dm.fw_dmcu); + + return 0; +} + +static int detect_mst_link_for_all_connectors(struct drm_device *dev) +{ + struct amdgpu_dm_connector *aconnector; + struct drm_connector *connector; + struct drm_connector_list_iter iter; + int ret = 0; + + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { + aconnector = to_amdgpu_dm_connector(connector); + if (aconnector->dc_link->type == dc_connection_mst_branch && + aconnector->mst_mgr.aux) { + DRM_DEBUG_DRIVER("DM_MST: starting TM on aconnector: %p [id: %d]\n", + aconnector, + aconnector->base.base.id); + + ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true); + if (ret < 0) { + DRM_ERROR("DM_MST: Failed to start MST\n"); + aconnector->dc_link->type = + dc_connection_single; + ret = dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx, + aconnector->dc_link); + break; + } + } + } + drm_connector_list_iter_end(&iter); + + return ret; +} + +static int dm_late_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + struct dmcu_iram_parameters params; + unsigned int linear_lut[16]; + int i; + struct dmcu *dmcu = NULL; + + dmcu = adev->dm.dc->res_pool->dmcu; + + for (i = 0; i < 16; i++) + linear_lut[i] = 0xFFFF * i / 15; + + params.set = 0; + params.backlight_ramping_override = false; + params.backlight_ramping_start = 0xCCCC; + params.backlight_ramping_reduction = 0xCCCCCCCC; + params.backlight_lut_array_size = 16; + params.backlight_lut_array = linear_lut; + + /* Min backlight level after ABM reduction, Don't allow below 1% + * 0xFFFF x 0.01 = 0x28F + */ + params.min_abm_backlight = 0x28F; + /* In the case where abm is implemented on dmcub, + * dmcu object will be null. + * ABM 2.4 and up are implemented on dmcub. + */ + if (dmcu) { + if (!dmcu_load_iram(dmcu, params)) + return -EINVAL; + } else if (adev->dm.dc->ctx->dmub_srv) { + struct dc_link *edp_links[MAX_NUM_EDP]; + int edp_num; + + dc_get_edp_links(adev->dm.dc, edp_links, &edp_num); + for (i = 0; i < edp_num; i++) { + if (!dmub_init_abm_config(adev->dm.dc->res_pool, params, i)) + return -EINVAL; + } + } + + return detect_mst_link_for_all_connectors(adev_to_drm(adev)); +} + +static void resume_mst_branch_status(struct drm_dp_mst_topology_mgr *mgr) +{ + int ret; + u8 guid[16]; + u64 tmp64; + + mutex_lock(&mgr->lock); + if (!mgr->mst_primary) + goto out_fail; + + if (drm_dp_read_dpcd_caps(mgr->aux, mgr->dpcd) < 0) { + drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n"); + goto out_fail; + } + + ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, + DP_MST_EN | + DP_UP_REQ_EN | + DP_UPSTREAM_IS_SRC); + if (ret < 0) { + drm_dbg_kms(mgr->dev, "mst write failed - undocked during suspend?\n"); + goto out_fail; + } + + /* Some hubs forget their guids after they resume */ + ret = drm_dp_dpcd_read(mgr->aux, DP_GUID, guid, 16); + if (ret != 16) { + drm_dbg_kms(mgr->dev, "dpcd read failed - undocked during suspend?\n"); + goto out_fail; + } + + if (memchr_inv(guid, 0, 16) == NULL) { + tmp64 = get_jiffies_64(); + memcpy(&guid[0], &tmp64, sizeof(u64)); + memcpy(&guid[8], &tmp64, sizeof(u64)); + + ret = drm_dp_dpcd_write(mgr->aux, DP_GUID, guid, 16); + + if (ret != 16) { + drm_dbg_kms(mgr->dev, "check mstb guid failed - undocked during suspend?\n"); + goto out_fail; + } + } + + memcpy(mgr->mst_primary->guid, guid, 16); + +out_fail: + mutex_unlock(&mgr->lock); +} + +static void s3_handle_mst(struct drm_device *dev, bool suspend) +{ + struct amdgpu_dm_connector *aconnector; + struct drm_connector *connector; + struct drm_connector_list_iter iter; + struct drm_dp_mst_topology_mgr *mgr; + + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { + aconnector = to_amdgpu_dm_connector(connector); + if (aconnector->dc_link->type != dc_connection_mst_branch || + aconnector->mst_root) + continue; + + mgr = &aconnector->mst_mgr; + + if (suspend) { + drm_dp_mst_topology_mgr_suspend(mgr); + } else { + /* if extended timeout is supported in hardware, + * default to LTTPR timeout (3.2ms) first as a W/A for DP link layer + * CTS 4.2.1.1 regression introduced by CTS specs requirement update. + */ + try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_LTTPR_TIMEOUT_PERIOD); + if (!dp_is_lttpr_present(aconnector->dc_link)) + try_to_configure_aux_timeout(aconnector->dc_link->ddc, LINK_AUX_DEFAULT_TIMEOUT_PERIOD); + + /* TODO: move resume_mst_branch_status() into drm mst resume again + * once topology probing work is pulled out from mst resume into mst + * resume 2nd step. mst resume 2nd step should be called after old + * state getting restored (i.e. drm_atomic_helper_resume()). + */ + resume_mst_branch_status(mgr); + } + } + drm_connector_list_iter_end(&iter); +} + +static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev) +{ + int ret = 0; + + /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends + * on window driver dc implementation. + * For Navi1x, clock settings of dcn watermarks are fixed. the settings + * should be passed to smu during boot up and resume from s3. + * boot up: dc calculate dcn watermark clock settings within dc_create, + * dcn20_resource_construct + * then call pplib functions below to pass the settings to smu: + * smu_set_watermarks_for_clock_ranges + * smu_set_watermarks_table + * navi10_set_watermarks_table + * smu_write_watermarks_table + * + * For Renoir, clock settings of dcn watermark are also fixed values. + * dc has implemented different flow for window driver: + * dc_hardware_init / dc_set_power_state + * dcn10_init_hw + * notify_wm_ranges + * set_wm_ranges + * -- Linux + * smu_set_watermarks_for_clock_ranges + * renoir_set_watermarks_table + * smu_write_watermarks_table + * + * For Linux, + * dc_hardware_init -> amdgpu_dm_init + * dc_set_power_state --> dm_resume + * + * therefore, this function apply to navi10/12/14 but not Renoir + * * + */ + switch (adev->ip_versions[DCE_HWIP][0]) { + case IP_VERSION(2, 0, 2): + case IP_VERSION(2, 0, 0): + break; + default: + return 0; + } + + ret = amdgpu_dpm_write_watermarks_table(adev); + if (ret) { + DRM_ERROR("Failed to update WMTABLE!\n"); + return ret; + } + + return 0; +} + +/** + * dm_hw_init() - Initialize DC device + * @handle: The base driver device containing the amdgpu_dm device. + * + * Initialize the &struct amdgpu_display_manager device. This involves calling + * the initializers of each DM component, then populating the struct with them. + * + * Although the function implies hardware initialization, both hardware and + * software are initialized here. Splitting them out to their relevant init + * hooks is a future TODO item. + * + * Some notable things that are initialized here: + * + * - Display Core, both software and hardware + * - DC modules that we need (freesync and color management) + * - DRM software states + * - Interrupt sources and handlers + * - Vblank support + * - Debug FS entries, if enabled + */ +static int dm_hw_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + /* Create DAL display manager */ + amdgpu_dm_init(adev); + amdgpu_dm_hpd_init(adev); + + return 0; +} + +/** + * dm_hw_fini() - Teardown DC device + * @handle: The base driver device containing the amdgpu_dm device. + * + * Teardown components within &struct amdgpu_display_manager that require + * cleanup. This involves cleaning up the DRM device, DC, and any modules that + * were loaded. Also flush IRQ workqueues and disable them. + */ +static int dm_hw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + amdgpu_dm_hpd_fini(adev); + + amdgpu_dm_irq_fini(adev); + amdgpu_dm_fini(adev); + return 0; +} + + +static void dm_gpureset_toggle_interrupts(struct amdgpu_device *adev, + struct dc_state *state, bool enable) +{ + enum dc_irq_source irq_source; + struct amdgpu_crtc *acrtc; + int rc = -EBUSY; + int i = 0; + + for (i = 0; i < state->stream_count; i++) { + acrtc = get_crtc_by_otg_inst( + adev, state->stream_status[i].primary_otg_inst); + + if (acrtc && state->stream_status[i].plane_count != 0) { + irq_source = IRQ_TYPE_PFLIP + acrtc->otg_inst; + rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY; + if (rc) + DRM_WARN("Failed to %s pflip interrupts\n", + enable ? "enable" : "disable"); + + if (enable) { + if (amdgpu_dm_crtc_vrr_active(to_dm_crtc_state(acrtc->base.state))) + rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, true); + } else + rc = amdgpu_dm_crtc_set_vupdate_irq(&acrtc->base, false); + + if (rc) + DRM_WARN("Failed to %sable vupdate interrupt\n", enable ? "en" : "dis"); + + irq_source = IRQ_TYPE_VBLANK + acrtc->otg_inst; + /* During gpu-reset we disable and then enable vblank irq, so + * don't use amdgpu_irq_get/put() to avoid refcount change. + */ + if (!dc_interrupt_set(adev->dm.dc, irq_source, enable)) + DRM_WARN("Failed to %sable vblank interrupt\n", enable ? "en" : "dis"); + } + } + +} + +static enum dc_status amdgpu_dm_commit_zero_streams(struct dc *dc) +{ + struct dc_state *context = NULL; + enum dc_status res = DC_ERROR_UNEXPECTED; + int i; + struct dc_stream_state *del_streams[MAX_PIPES]; + int del_streams_count = 0; + + memset(del_streams, 0, sizeof(del_streams)); + + context = dc_create_state(dc); + if (context == NULL) + goto context_alloc_fail; + + dc_resource_state_copy_construct_current(dc, context); + + /* First remove from context all streams */ + for (i = 0; i < context->stream_count; i++) { + struct dc_stream_state *stream = context->streams[i]; + + del_streams[del_streams_count++] = stream; + } + + /* Remove all planes for removed streams and then remove the streams */ + for (i = 0; i < del_streams_count; i++) { + if (!dc_rem_all_planes_for_stream(dc, del_streams[i], context)) { + res = DC_FAIL_DETACH_SURFACES; + goto fail; + } + + res = dc_remove_stream_from_ctx(dc, context, del_streams[i]); + if (res != DC_OK) + goto fail; + } + + res = dc_commit_streams(dc, context->streams, context->stream_count); + +fail: + dc_release_state(context); + +context_alloc_fail: + return res; +} + +static void hpd_rx_irq_work_suspend(struct amdgpu_display_manager *dm) +{ + int i; + + if (dm->hpd_rx_offload_wq) { + for (i = 0; i < dm->dc->caps.max_links; i++) + flush_workqueue(dm->hpd_rx_offload_wq[i].wq); + } +} + +static int dm_suspend(void *handle) +{ + struct amdgpu_device *adev = handle; + struct amdgpu_display_manager *dm = &adev->dm; + int ret = 0; + + if (amdgpu_in_reset(adev)) { + mutex_lock(&dm->dc_lock); + + dc_allow_idle_optimizations(adev->dm.dc, false); + + dm->cached_dc_state = dc_copy_state(dm->dc->current_state); + + dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false); + + amdgpu_dm_commit_zero_streams(dm->dc); + + amdgpu_dm_irq_suspend(adev); + + hpd_rx_irq_work_suspend(dm); + + return ret; + } + + WARN_ON(adev->dm.cached_state); + adev->dm.cached_state = drm_atomic_helper_suspend(adev_to_drm(adev)); + + s3_handle_mst(adev_to_drm(adev), true); + + amdgpu_dm_irq_suspend(adev); + + hpd_rx_irq_work_suspend(dm); + + dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D3); + + return 0; +} + +struct amdgpu_dm_connector * +amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state, + struct drm_crtc *crtc) +{ + u32 i; + struct drm_connector_state *new_con_state; + struct drm_connector *connector; + struct drm_crtc *crtc_from_state; + + for_each_new_connector_in_state(state, connector, new_con_state, i) { + crtc_from_state = new_con_state->crtc; + + if (crtc_from_state == crtc) + return to_amdgpu_dm_connector(connector); + } + + return NULL; +} + +static void emulated_link_detect(struct dc_link *link) +{ + struct dc_sink_init_data sink_init_data = { 0 }; + struct display_sink_capability sink_caps = { 0 }; + enum dc_edid_status edid_status; + struct dc_context *dc_ctx = link->ctx; + struct dc_sink *sink = NULL; + struct dc_sink *prev_sink = NULL; + + link->type = dc_connection_none; + prev_sink = link->local_sink; + + if (prev_sink) + dc_sink_release(prev_sink); + + switch (link->connector_signal) { + case SIGNAL_TYPE_HDMI_TYPE_A: { + sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; + sink_caps.signal = SIGNAL_TYPE_HDMI_TYPE_A; + break; + } + + case SIGNAL_TYPE_DVI_SINGLE_LINK: { + sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; + sink_caps.signal = SIGNAL_TYPE_DVI_SINGLE_LINK; + break; + } + + case SIGNAL_TYPE_DVI_DUAL_LINK: { + sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; + sink_caps.signal = SIGNAL_TYPE_DVI_DUAL_LINK; + break; + } + + case SIGNAL_TYPE_LVDS: { + sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C; + sink_caps.signal = SIGNAL_TYPE_LVDS; + break; + } + + case SIGNAL_TYPE_EDP: { + sink_caps.transaction_type = + DDC_TRANSACTION_TYPE_I2C_OVER_AUX; + sink_caps.signal = SIGNAL_TYPE_EDP; + break; + } + + case SIGNAL_TYPE_DISPLAY_PORT: { + sink_caps.transaction_type = + DDC_TRANSACTION_TYPE_I2C_OVER_AUX; + sink_caps.signal = SIGNAL_TYPE_VIRTUAL; + break; + } + + default: + DC_ERROR("Invalid connector type! signal:%d\n", + link->connector_signal); + return; + } + + sink_init_data.link = link; + sink_init_data.sink_signal = sink_caps.signal; + + sink = dc_sink_create(&sink_init_data); + if (!sink) { + DC_ERROR("Failed to create sink!\n"); + return; + } + + /* dc_sink_create returns a new reference */ + link->local_sink = sink; + + edid_status = dm_helpers_read_local_edid( + link->ctx, + link, + sink); + + if (edid_status != EDID_OK) + DC_ERROR("Failed to read EDID"); + +} + +static void dm_gpureset_commit_state(struct dc_state *dc_state, + struct amdgpu_display_manager *dm) +{ + struct { + struct dc_surface_update surface_updates[MAX_SURFACES]; + struct dc_plane_info plane_infos[MAX_SURFACES]; + struct dc_scaling_info scaling_infos[MAX_SURFACES]; + struct dc_flip_addrs flip_addrs[MAX_SURFACES]; + struct dc_stream_update stream_update; + } *bundle; + int k, m; + + bundle = kzalloc(sizeof(*bundle), GFP_KERNEL); + + if (!bundle) { + dm_error("Failed to allocate update bundle\n"); + goto cleanup; + } + + for (k = 0; k < dc_state->stream_count; k++) { + bundle->stream_update.stream = dc_state->streams[k]; + + for (m = 0; m < dc_state->stream_status->plane_count; m++) { + bundle->surface_updates[m].surface = + dc_state->stream_status->plane_states[m]; + bundle->surface_updates[m].surface->force_full_update = + true; + } + + update_planes_and_stream_adapter(dm->dc, + UPDATE_TYPE_FULL, + dc_state->stream_status->plane_count, + dc_state->streams[k], + &bundle->stream_update, + bundle->surface_updates); + } + +cleanup: + kfree(bundle); +} + +static int dm_resume(void *handle) +{ + struct amdgpu_device *adev = handle; + struct drm_device *ddev = adev_to_drm(adev); + struct amdgpu_display_manager *dm = &adev->dm; + struct amdgpu_dm_connector *aconnector; + struct drm_connector *connector; + struct drm_connector_list_iter iter; + struct drm_crtc *crtc; + struct drm_crtc_state *new_crtc_state; + struct dm_crtc_state *dm_new_crtc_state; + struct drm_plane *plane; + struct drm_plane_state *new_plane_state; + struct dm_plane_state *dm_new_plane_state; + struct dm_atomic_state *dm_state = to_dm_atomic_state(dm->atomic_obj.state); + enum dc_connection_type new_connection_type = dc_connection_none; + struct dc_state *dc_state; + int i, r, j, ret; + bool need_hotplug = false; + + if (amdgpu_in_reset(adev)) { + dc_state = dm->cached_dc_state; + + /* + * The dc->current_state is backed up into dm->cached_dc_state + * before we commit 0 streams. + * + * DC will clear link encoder assignments on the real state + * but the changes won't propagate over to the copy we made + * before the 0 streams commit. + * + * DC expects that link encoder assignments are *not* valid + * when committing a state, so as a workaround we can copy + * off of the current state. + * + * We lose the previous assignments, but we had already + * commit 0 streams anyway. + */ + link_enc_cfg_copy(adev->dm.dc->current_state, dc_state); + + r = dm_dmub_hw_init(adev); + if (r) + DRM_ERROR("DMUB interface failed to initialize: status=%d\n", r); + + dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); + dc_resume(dm->dc); + + amdgpu_dm_irq_resume_early(adev); + + for (i = 0; i < dc_state->stream_count; i++) { + dc_state->streams[i]->mode_changed = true; + for (j = 0; j < dc_state->stream_status[i].plane_count; j++) { + dc_state->stream_status[i].plane_states[j]->update_flags.raw + = 0xffffffff; + } + } + + if (dc_is_dmub_outbox_supported(adev->dm.dc)) { + amdgpu_dm_outbox_init(adev); + dc_enable_dmub_outbox(adev->dm.dc); + } + + WARN_ON(!dc_commit_streams(dm->dc, dc_state->streams, dc_state->stream_count)); + + dm_gpureset_commit_state(dm->cached_dc_state, dm); + + dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, true); + + dc_release_state(dm->cached_dc_state); + dm->cached_dc_state = NULL; + + amdgpu_dm_irq_resume_late(adev); + + mutex_unlock(&dm->dc_lock); + + return 0; + } + /* Recreate dc_state - DC invalidates it when setting power state to S3. */ + dc_release_state(dm_state->context); + dm_state->context = dc_create_state(dm->dc); + /* TODO: Remove dc_state->dccg, use dc->dccg directly. */ + dc_resource_state_construct(dm->dc, dm_state->context); + + /* Before powering on DC we need to re-initialize DMUB. */ + dm_dmub_hw_resume(adev); + + /* Re-enable outbox interrupts for DPIA. */ + if (dc_is_dmub_outbox_supported(adev->dm.dc)) { + amdgpu_dm_outbox_init(adev); + dc_enable_dmub_outbox(adev->dm.dc); + } + + /* power on hardware */ + dc_set_power_state(dm->dc, DC_ACPI_CM_POWER_STATE_D0); + + /* program HPD filter */ + dc_resume(dm->dc); + + /* + * early enable HPD Rx IRQ, should be done before set mode as short + * pulse interrupts are used for MST + */ + amdgpu_dm_irq_resume_early(adev); + + /* On resume we need to rewrite the MSTM control bits to enable MST*/ + s3_handle_mst(ddev, false); + + /* Do detection*/ + drm_connector_list_iter_begin(ddev, &iter); + drm_for_each_connector_iter(connector, &iter) { + aconnector = to_amdgpu_dm_connector(connector); + + if (!aconnector->dc_link) + continue; + + /* + * this is the case when traversing through already created end sink + * MST connectors, should be skipped + */ + if (aconnector && aconnector->mst_root) + continue; + + mutex_lock(&aconnector->hpd_lock); + if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type)) + DRM_ERROR("KMS: Failed to detect connector\n"); + + if (aconnector->base.force && new_connection_type == dc_connection_none) { + emulated_link_detect(aconnector->dc_link); + } else { + mutex_lock(&dm->dc_lock); + dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); + mutex_unlock(&dm->dc_lock); + } + + if (aconnector->fake_enable && aconnector->dc_link->local_sink) + aconnector->fake_enable = false; + + if (aconnector->dc_sink) + dc_sink_release(aconnector->dc_sink); + aconnector->dc_sink = NULL; + amdgpu_dm_update_connector_after_detect(aconnector); + mutex_unlock(&aconnector->hpd_lock); + } + drm_connector_list_iter_end(&iter); + + /* Force mode set in atomic commit */ + for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) + new_crtc_state->active_changed = true; + + /* + * atomic_check is expected to create the dc states. We need to release + * them here, since they were duplicated as part of the suspend + * procedure. + */ + for_each_new_crtc_in_state(dm->cached_state, crtc, new_crtc_state, i) { + dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); + if (dm_new_crtc_state->stream) { + WARN_ON(kref_read(&dm_new_crtc_state->stream->refcount) > 1); + dc_stream_release(dm_new_crtc_state->stream); + dm_new_crtc_state->stream = NULL; + } + } + + for_each_new_plane_in_state(dm->cached_state, plane, new_plane_state, i) { + dm_new_plane_state = to_dm_plane_state(new_plane_state); + if (dm_new_plane_state->dc_state) { + WARN_ON(kref_read(&dm_new_plane_state->dc_state->refcount) > 1); + dc_plane_state_release(dm_new_plane_state->dc_state); + dm_new_plane_state->dc_state = NULL; + } + } + + drm_atomic_helper_resume(ddev, dm->cached_state); + + dm->cached_state = NULL; + + /* Do mst topology probing after resuming cached state*/ + drm_connector_list_iter_begin(ddev, &iter); + drm_for_each_connector_iter(connector, &iter) { + aconnector = to_amdgpu_dm_connector(connector); + if (aconnector->dc_link->type != dc_connection_mst_branch || + aconnector->mst_root) + continue; + + ret = drm_dp_mst_topology_mgr_resume(&aconnector->mst_mgr, true); + + if (ret < 0) { + dm_helpers_dp_mst_stop_top_mgr(aconnector->dc_link->ctx, + aconnector->dc_link); + need_hotplug = true; + } + } + drm_connector_list_iter_end(&iter); + + if (need_hotplug) + drm_kms_helper_hotplug_event(ddev); + + amdgpu_dm_irq_resume_late(adev); + + amdgpu_dm_smu_write_watermarks_table(adev); + + return 0; +} + +/** + * DOC: DM Lifecycle + * + * DM (and consequently DC) is registered in the amdgpu base driver as a IP + * block. When CONFIG_DRM_AMD_DC is enabled, the DM device IP block is added to + * the base driver's device list to be initialized and torn down accordingly. + * + * The functions to do so are provided as hooks in &struct amd_ip_funcs. + */ + +static const struct amd_ip_funcs amdgpu_dm_funcs = { + .name = "dm", + .early_init = dm_early_init, + .late_init = dm_late_init, + .sw_init = dm_sw_init, + .sw_fini = dm_sw_fini, + .early_fini = amdgpu_dm_early_fini, + .hw_init = dm_hw_init, + .hw_fini = dm_hw_fini, + .suspend = dm_suspend, + .resume = dm_resume, + .is_idle = dm_is_idle, + .wait_for_idle = dm_wait_for_idle, + .check_soft_reset = dm_check_soft_reset, + .soft_reset = dm_soft_reset, + .set_clockgating_state = dm_set_clockgating_state, + .set_powergating_state = dm_set_powergating_state, +}; + +const struct amdgpu_ip_block_version dm_ip_block = { + .type = AMD_IP_BLOCK_TYPE_DCE, + .major = 1, + .minor = 0, + .rev = 0, + .funcs = &amdgpu_dm_funcs, +}; + + +/** + * DOC: atomic + * + * *WIP* + */ + +static const struct drm_mode_config_funcs amdgpu_dm_mode_funcs = { + .fb_create = amdgpu_display_user_framebuffer_create, + .get_format_info = amdgpu_dm_plane_get_format_info, + .atomic_check = amdgpu_dm_atomic_check, + .atomic_commit = drm_atomic_helper_commit, +}; + +static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = { + .atomic_commit_tail = amdgpu_dm_atomic_commit_tail, + .atomic_commit_setup = drm_dp_mst_atomic_setup_commit, +}; + +static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) +{ + struct amdgpu_dm_backlight_caps *caps; + struct drm_connector *conn_base; + struct amdgpu_device *adev; + struct drm_luminance_range_info *luminance_range; + + if (aconnector->bl_idx == -1 || + aconnector->dc_link->connector_signal != SIGNAL_TYPE_EDP) + return; + + conn_base = &aconnector->base; + adev = drm_to_adev(conn_base->dev); + + caps = &adev->dm.backlight_caps[aconnector->bl_idx]; + caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps; + caps->aux_support = false; + + if (caps->ext_caps->bits.oled == 1 + /* + * || + * caps->ext_caps->bits.sdr_aux_backlight_control == 1 || + * caps->ext_caps->bits.hdr_aux_backlight_control == 1 + */) + caps->aux_support = true; + + if (amdgpu_backlight == 0) + caps->aux_support = false; + else if (amdgpu_backlight == 1) + caps->aux_support = true; + + luminance_range = &conn_base->display_info.luminance_range; + + if (luminance_range->max_luminance) { + caps->aux_min_input_signal = luminance_range->min_luminance; + caps->aux_max_input_signal = luminance_range->max_luminance; + } else { + caps->aux_min_input_signal = 0; + caps->aux_max_input_signal = 512; + } +} + +void amdgpu_dm_update_connector_after_detect( + struct amdgpu_dm_connector *aconnector) +{ + struct drm_connector *connector = &aconnector->base; + struct drm_device *dev = connector->dev; + struct dc_sink *sink; + + /* MST handled by drm_mst framework */ + if (aconnector->mst_mgr.mst_state == true) + return; + + sink = aconnector->dc_link->local_sink; + if (sink) + dc_sink_retain(sink); + + /* + * Edid mgmt connector gets first update only in mode_valid hook and then + * the connector sink is set to either fake or physical sink depends on link status. + * Skip if already done during boot. + */ + if (aconnector->base.force != DRM_FORCE_UNSPECIFIED + && aconnector->dc_em_sink) { + + /* + * For S3 resume with headless use eml_sink to fake stream + * because on resume connector->sink is set to NULL + */ + mutex_lock(&dev->mode_config.mutex); + + if (sink) { + if (aconnector->dc_sink) { + amdgpu_dm_update_freesync_caps(connector, NULL); + /* + * retain and release below are used to + * bump up refcount for sink because the link doesn't point + * to it anymore after disconnect, so on next crtc to connector + * reshuffle by UMD we will get into unwanted dc_sink release + */ + dc_sink_release(aconnector->dc_sink); + } + aconnector->dc_sink = sink; + dc_sink_retain(aconnector->dc_sink); + amdgpu_dm_update_freesync_caps(connector, + aconnector->edid); + } else { + amdgpu_dm_update_freesync_caps(connector, NULL); + if (!aconnector->dc_sink) { + aconnector->dc_sink = aconnector->dc_em_sink; + dc_sink_retain(aconnector->dc_sink); + } + } + + mutex_unlock(&dev->mode_config.mutex); + + if (sink) + dc_sink_release(sink); + return; + } + + /* + * TODO: temporary guard to look for proper fix + * if this sink is MST sink, we should not do anything + */ + if (sink && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { + dc_sink_release(sink); + return; + } + + if (aconnector->dc_sink == sink) { + /* + * We got a DP short pulse (Link Loss, DP CTS, etc...). + * Do nothing!! + */ + DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: dc_sink didn't change.\n", + aconnector->connector_id); + if (sink) + dc_sink_release(sink); + return; + } + + DRM_DEBUG_DRIVER("DCHPD: connector_id=%d: Old sink=%p New sink=%p\n", + aconnector->connector_id, aconnector->dc_sink, sink); + + mutex_lock(&dev->mode_config.mutex); + + /* + * 1. Update status of the drm connector + * 2. Send an event and let userspace tell us what to do + */ + if (sink) { + /* + * TODO: check if we still need the S3 mode update workaround. + * If yes, put it here. + */ + if (aconnector->dc_sink) { + amdgpu_dm_update_freesync_caps(connector, NULL); + dc_sink_release(aconnector->dc_sink); + } + + aconnector->dc_sink = sink; + dc_sink_retain(aconnector->dc_sink); + if (sink->dc_edid.length == 0) { + aconnector->edid = NULL; + if (aconnector->dc_link->aux_mode) { + drm_dp_cec_unset_edid( + &aconnector->dm_dp_aux.aux); + } + } else { + aconnector->edid = + (struct edid *)sink->dc_edid.raw_edid; + + if (aconnector->dc_link->aux_mode) + drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux, + aconnector->edid); + } + + if (!aconnector->timing_requested) { + aconnector->timing_requested = + kzalloc(sizeof(struct dc_crtc_timing), GFP_KERNEL); + if (!aconnector->timing_requested) + dm_error("failed to create aconnector->requested_timing\n"); + } + + drm_connector_update_edid_property(connector, aconnector->edid); + amdgpu_dm_update_freesync_caps(connector, aconnector->edid); + update_connector_ext_caps(aconnector); + } else { + drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux); + amdgpu_dm_update_freesync_caps(connector, NULL); + drm_connector_update_edid_property(connector, NULL); + aconnector->num_modes = 0; + dc_sink_release(aconnector->dc_sink); + aconnector->dc_sink = NULL; + aconnector->edid = NULL; + kfree(aconnector->timing_requested); + aconnector->timing_requested = NULL; + /* Set CP to DESIRED if it was ENABLED, so we can re-enable it again on hotplug */ + if (connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) + connector->state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; + } + + mutex_unlock(&dev->mode_config.mutex); + + update_subconnector_property(aconnector); + + if (sink) + dc_sink_release(sink); +} + +static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector) +{ + struct drm_connector *connector = &aconnector->base; + struct drm_device *dev = connector->dev; + enum dc_connection_type new_connection_type = dc_connection_none; + struct amdgpu_device *adev = drm_to_adev(dev); + struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state); + bool ret = false; + + if (adev->dm.disable_hpd_irq) + return; + + /* + * In case of failure or MST no need to update connector status or notify the OS + * since (for MST case) MST does this in its own context. + */ + mutex_lock(&aconnector->hpd_lock); + + if (adev->dm.hdcp_workqueue) { + hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index); + dm_con_state->update_hdcp = true; + } + if (aconnector->fake_enable) + aconnector->fake_enable = false; + + aconnector->timing_changed = false; + + if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type)) + DRM_ERROR("KMS: Failed to detect connector\n"); + + if (aconnector->base.force && new_connection_type == dc_connection_none) { + emulated_link_detect(aconnector->dc_link); + + drm_modeset_lock_all(dev); + dm_restore_drm_connector_state(dev, connector); + drm_modeset_unlock_all(dev); + + if (aconnector->base.force == DRM_FORCE_UNSPECIFIED) + drm_kms_helper_connector_hotplug_event(connector); + } else { + mutex_lock(&adev->dm.dc_lock); + ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); + mutex_unlock(&adev->dm.dc_lock); + if (ret) { + amdgpu_dm_update_connector_after_detect(aconnector); + + drm_modeset_lock_all(dev); + dm_restore_drm_connector_state(dev, connector); + drm_modeset_unlock_all(dev); + + if (aconnector->base.force == DRM_FORCE_UNSPECIFIED) + drm_kms_helper_connector_hotplug_event(connector); + } + } + mutex_unlock(&aconnector->hpd_lock); + +} + +static void handle_hpd_irq(void *param) +{ + struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param; + + handle_hpd_irq_helper(aconnector); + +} + +static void schedule_hpd_rx_offload_work(struct hpd_rx_irq_offload_work_queue *offload_wq, + union hpd_irq_data hpd_irq_data) +{ + struct hpd_rx_irq_offload_work *offload_work = + kzalloc(sizeof(*offload_work), GFP_KERNEL); + + if (!offload_work) { + DRM_ERROR("Failed to allocate hpd_rx_irq_offload_work.\n"); + return; + } + + INIT_WORK(&offload_work->work, dm_handle_hpd_rx_offload_work); + offload_work->data = hpd_irq_data; + offload_work->offload_wq = offload_wq; + + queue_work(offload_wq->wq, &offload_work->work); + DRM_DEBUG_KMS("queue work to handle hpd_rx offload work"); +} + +static void handle_hpd_rx_irq(void *param) +{ + struct amdgpu_dm_connector *aconnector = (struct amdgpu_dm_connector *)param; + struct drm_connector *connector = &aconnector->base; + struct drm_device *dev = connector->dev; + struct dc_link *dc_link = aconnector->dc_link; + bool is_mst_root_connector = aconnector->mst_mgr.mst_state; + bool result = false; + enum dc_connection_type new_connection_type = dc_connection_none; + struct amdgpu_device *adev = drm_to_adev(dev); + union hpd_irq_data hpd_irq_data; + bool link_loss = false; + bool has_left_work = false; + int idx = dc_link->link_index; + struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx]; + + memset(&hpd_irq_data, 0, sizeof(hpd_irq_data)); + + if (adev->dm.disable_hpd_irq) + return; + + /* + * TODO:Temporary add mutex to protect hpd interrupt not have a gpio + * conflict, after implement i2c helper, this mutex should be + * retired. + */ + mutex_lock(&aconnector->hpd_lock); + + result = dc_link_handle_hpd_rx_irq(dc_link, &hpd_irq_data, + &link_loss, true, &has_left_work); + + if (!has_left_work) + goto out; + + if (hpd_irq_data.bytes.device_service_irq.bits.AUTOMATED_TEST) { + schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data); + goto out; + } + + if (dc_link_dp_allow_hpd_rx_irq(dc_link)) { + if (hpd_irq_data.bytes.device_service_irq.bits.UP_REQ_MSG_RDY || + hpd_irq_data.bytes.device_service_irq.bits.DOWN_REP_MSG_RDY) { + bool skip = false; + + /* + * DOWN_REP_MSG_RDY is also handled by polling method + * mgr->cbs->poll_hpd_irq() + */ + spin_lock(&offload_wq->offload_lock); + skip = offload_wq->is_handling_mst_msg_rdy_event; + + if (!skip) + offload_wq->is_handling_mst_msg_rdy_event = true; + + spin_unlock(&offload_wq->offload_lock); + + if (!skip) + schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data); + + goto out; + } + + if (link_loss) { + bool skip = false; + + spin_lock(&offload_wq->offload_lock); + skip = offload_wq->is_handling_link_loss; + + if (!skip) + offload_wq->is_handling_link_loss = true; + + spin_unlock(&offload_wq->offload_lock); + + if (!skip) + schedule_hpd_rx_offload_work(offload_wq, hpd_irq_data); + + goto out; + } + } + +out: + if (result && !is_mst_root_connector) { + /* Downstream Port status changed. */ + if (!dc_link_detect_connection_type(dc_link, &new_connection_type)) + DRM_ERROR("KMS: Failed to detect connector\n"); + + if (aconnector->base.force && new_connection_type == dc_connection_none) { + emulated_link_detect(dc_link); + + if (aconnector->fake_enable) + aconnector->fake_enable = false; + + amdgpu_dm_update_connector_after_detect(aconnector); + + + drm_modeset_lock_all(dev); + dm_restore_drm_connector_state(dev, connector); + drm_modeset_unlock_all(dev); + + drm_kms_helper_connector_hotplug_event(connector); + } else { + bool ret = false; + + mutex_lock(&adev->dm.dc_lock); + ret = dc_link_detect(dc_link, DETECT_REASON_HPDRX); + mutex_unlock(&adev->dm.dc_lock); + + if (ret) { + if (aconnector->fake_enable) + aconnector->fake_enable = false; + + amdgpu_dm_update_connector_after_detect(aconnector); + + drm_modeset_lock_all(dev); + dm_restore_drm_connector_state(dev, connector); + drm_modeset_unlock_all(dev); + + drm_kms_helper_connector_hotplug_event(connector); + } + } + } + if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) { + if (adev->dm.hdcp_workqueue) + hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index); + } + + if (dc_link->type != dc_connection_mst_branch) + drm_dp_cec_irq(&aconnector->dm_dp_aux.aux); + + mutex_unlock(&aconnector->hpd_lock); +} + +static void register_hpd_handlers(struct amdgpu_device *adev) +{ + struct drm_device *dev = adev_to_drm(adev); + struct drm_connector *connector; + struct amdgpu_dm_connector *aconnector; + const struct dc_link *dc_link; + struct dc_interrupt_params int_params = {0}; + + int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; + int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; + + list_for_each_entry(connector, + &dev->mode_config.connector_list, head) { + + aconnector = to_amdgpu_dm_connector(connector); + dc_link = aconnector->dc_link; + + if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) { + int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; + int_params.irq_source = dc_link->irq_source_hpd; + + amdgpu_dm_irq_register_interrupt(adev, &int_params, + handle_hpd_irq, + (void *) aconnector); + } + + if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) { + + /* Also register for DP short pulse (hpd_rx). */ + int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; + int_params.irq_source = dc_link->irq_source_hpd_rx; + + amdgpu_dm_irq_register_interrupt(adev, &int_params, + handle_hpd_rx_irq, + (void *) aconnector); + } + + if (adev->dm.hpd_rx_offload_wq) + adev->dm.hpd_rx_offload_wq[connector->index].aconnector = + aconnector; + } +} + +#if defined(CONFIG_DRM_AMD_DC_SI) +/* Register IRQ sources and initialize IRQ callbacks */ +static int dce60_register_irq_handlers(struct amdgpu_device *adev) +{ + struct dc *dc = adev->dm.dc; + struct common_irq_params *c_irq_params; + struct dc_interrupt_params int_params = {0}; + int r; + int i; + unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY; + + int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; + int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; + + /* + * Actions of amdgpu_irq_add_id(): + * 1. Register a set() function with base driver. + * Base driver will call set() function to enable/disable an + * interrupt in DC hardware. + * 2. Register amdgpu_dm_irq_handler(). + * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts + * coming from DC hardware. + * amdgpu_dm_irq_handler() will re-direct the interrupt to DC + * for acknowledging and handling. + */ + + /* Use VBLANK interrupt */ + for (i = 0; i < adev->mode_info.num_crtc; i++) { + r = amdgpu_irq_add_id(adev, client_id, i + 1, &adev->crtc_irq); + if (r) { + DRM_ERROR("Failed to add crtc irq id!\n"); + return r; + } + + int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; + int_params.irq_source = + dc_interrupt_to_irq_source(dc, i + 1, 0); + + c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1]; + + c_irq_params->adev = adev; + c_irq_params->irq_src = int_params.irq_source; + + amdgpu_dm_irq_register_interrupt(adev, &int_params, + dm_crtc_high_irq, c_irq_params); + } + + /* Use GRPH_PFLIP interrupt */ + for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; + i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) { + r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq); + if (r) { + DRM_ERROR("Failed to add page flip irq id!\n"); + return r; + } + + int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; + int_params.irq_source = + dc_interrupt_to_irq_source(dc, i, 0); + + c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST]; + + c_irq_params->adev = adev; + c_irq_params->irq_src = int_params.irq_source; + + amdgpu_dm_irq_register_interrupt(adev, &int_params, + dm_pflip_high_irq, c_irq_params); + + } + + /* HPD */ + r = amdgpu_irq_add_id(adev, client_id, + VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq); + if (r) { + DRM_ERROR("Failed to add hpd irq id!\n"); + return r; + } + + register_hpd_handlers(adev); + + return 0; +} +#endif + +/* Register IRQ sources and initialize IRQ callbacks */ +static int dce110_register_irq_handlers(struct amdgpu_device *adev) +{ + struct dc *dc = adev->dm.dc; + struct common_irq_params *c_irq_params; + struct dc_interrupt_params int_params = {0}; + int r; + int i; + unsigned int client_id = AMDGPU_IRQ_CLIENTID_LEGACY; + + if (adev->family >= AMDGPU_FAMILY_AI) + client_id = SOC15_IH_CLIENTID_DCE; + + int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; + int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; + + /* + * Actions of amdgpu_irq_add_id(): + * 1. Register a set() function with base driver. + * Base driver will call set() function to enable/disable an + * interrupt in DC hardware. + * 2. Register amdgpu_dm_irq_handler(). + * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts + * coming from DC hardware. + * amdgpu_dm_irq_handler() will re-direct the interrupt to DC + * for acknowledging and handling. + */ + + /* Use VBLANK interrupt */ + for (i = VISLANDS30_IV_SRCID_D1_VERTICAL_INTERRUPT0; i <= VISLANDS30_IV_SRCID_D6_VERTICAL_INTERRUPT0; i++) { + r = amdgpu_irq_add_id(adev, client_id, i, &adev->crtc_irq); + if (r) { + DRM_ERROR("Failed to add crtc irq id!\n"); + return r; + } + + int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; + int_params.irq_source = + dc_interrupt_to_irq_source(dc, i, 0); + + c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1]; + + c_irq_params->adev = adev; + c_irq_params->irq_src = int_params.irq_source; + + amdgpu_dm_irq_register_interrupt(adev, &int_params, + dm_crtc_high_irq, c_irq_params); + } + + /* Use VUPDATE interrupt */ + for (i = VISLANDS30_IV_SRCID_D1_V_UPDATE_INT; i <= VISLANDS30_IV_SRCID_D6_V_UPDATE_INT; i += 2) { + r = amdgpu_irq_add_id(adev, client_id, i, &adev->vupdate_irq); + if (r) { + DRM_ERROR("Failed to add vupdate irq id!\n"); + return r; + } + + int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; + int_params.irq_source = + dc_interrupt_to_irq_source(dc, i, 0); + + c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1]; + + c_irq_params->adev = adev; + c_irq_params->irq_src = int_params.irq_source; + + amdgpu_dm_irq_register_interrupt(adev, &int_params, + dm_vupdate_high_irq, c_irq_params); + } + + /* Use GRPH_PFLIP interrupt */ + for (i = VISLANDS30_IV_SRCID_D1_GRPH_PFLIP; + i <= VISLANDS30_IV_SRCID_D6_GRPH_PFLIP; i += 2) { + r = amdgpu_irq_add_id(adev, client_id, i, &adev->pageflip_irq); + if (r) { + DRM_ERROR("Failed to add page flip irq id!\n"); + return r; + } + + int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; + int_params.irq_source = + dc_interrupt_to_irq_source(dc, i, 0); + + c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST]; + + c_irq_params->adev = adev; + c_irq_params->irq_src = int_params.irq_source; + + amdgpu_dm_irq_register_interrupt(adev, &int_params, + dm_pflip_high_irq, c_irq_params); + + } + + /* HPD */ + r = amdgpu_irq_add_id(adev, client_id, + VISLANDS30_IV_SRCID_HOTPLUG_DETECT_A, &adev->hpd_irq); + if (r) { + DRM_ERROR("Failed to add hpd irq id!\n"); + return r; + } + + register_hpd_handlers(adev); + + return 0; +} + +/* Register IRQ sources and initialize IRQ callbacks */ +static int dcn10_register_irq_handlers(struct amdgpu_device *adev) +{ + struct dc *dc = adev->dm.dc; + struct common_irq_params *c_irq_params; + struct dc_interrupt_params int_params = {0}; + int r; + int i; +#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) + static const unsigned int vrtl_int_srcid[] = { + DCN_1_0__SRCID__OTG1_VERTICAL_INTERRUPT0_CONTROL, + DCN_1_0__SRCID__OTG2_VERTICAL_INTERRUPT0_CONTROL, + DCN_1_0__SRCID__OTG3_VERTICAL_INTERRUPT0_CONTROL, + DCN_1_0__SRCID__OTG4_VERTICAL_INTERRUPT0_CONTROL, + DCN_1_0__SRCID__OTG5_VERTICAL_INTERRUPT0_CONTROL, + DCN_1_0__SRCID__OTG6_VERTICAL_INTERRUPT0_CONTROL + }; +#endif + + int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; + int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; + + /* + * Actions of amdgpu_irq_add_id(): + * 1. Register a set() function with base driver. + * Base driver will call set() function to enable/disable an + * interrupt in DC hardware. + * 2. Register amdgpu_dm_irq_handler(). + * Base driver will call amdgpu_dm_irq_handler() for ALL interrupts + * coming from DC hardware. + * amdgpu_dm_irq_handler() will re-direct the interrupt to DC + * for acknowledging and handling. + */ + + /* Use VSTARTUP interrupt */ + for (i = DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP; + i <= DCN_1_0__SRCID__DC_D1_OTG_VSTARTUP + adev->mode_info.num_crtc - 1; + i++) { + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->crtc_irq); + + if (r) { + DRM_ERROR("Failed to add crtc irq id!\n"); + return r; + } + + int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; + int_params.irq_source = + dc_interrupt_to_irq_source(dc, i, 0); + + c_irq_params = &adev->dm.vblank_params[int_params.irq_source - DC_IRQ_SOURCE_VBLANK1]; + + c_irq_params->adev = adev; + c_irq_params->irq_src = int_params.irq_source; + + amdgpu_dm_irq_register_interrupt( + adev, &int_params, dm_crtc_high_irq, c_irq_params); + } + + /* Use otg vertical line interrupt */ +#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) + for (i = 0; i <= adev->mode_info.num_crtc - 1; i++) { + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, + vrtl_int_srcid[i], &adev->vline0_irq); + + if (r) { + DRM_ERROR("Failed to add vline0 irq id!\n"); + return r; + } + + int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; + int_params.irq_source = + dc_interrupt_to_irq_source(dc, vrtl_int_srcid[i], 0); + + if (int_params.irq_source == DC_IRQ_SOURCE_INVALID) { + DRM_ERROR("Failed to register vline0 irq %d!\n", vrtl_int_srcid[i]); + break; + } + + c_irq_params = &adev->dm.vline0_params[int_params.irq_source + - DC_IRQ_SOURCE_DC1_VLINE0]; + + c_irq_params->adev = adev; + c_irq_params->irq_src = int_params.irq_source; + + amdgpu_dm_irq_register_interrupt(adev, &int_params, + dm_dcn_vertical_interrupt0_high_irq, c_irq_params); + } +#endif + + /* Use VUPDATE_NO_LOCK interrupt on DCN, which seems to correspond to + * the regular VUPDATE interrupt on DCE. We want DC_IRQ_SOURCE_VUPDATEx + * to trigger at end of each vblank, regardless of state of the lock, + * matching DCE behaviour. + */ + for (i = DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT; + i <= DCN_1_0__SRCID__OTG0_IHC_V_UPDATE_NO_LOCK_INTERRUPT + adev->mode_info.num_crtc - 1; + i++) { + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->vupdate_irq); + + if (r) { + DRM_ERROR("Failed to add vupdate irq id!\n"); + return r; + } + + int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; + int_params.irq_source = + dc_interrupt_to_irq_source(dc, i, 0); + + c_irq_params = &adev->dm.vupdate_params[int_params.irq_source - DC_IRQ_SOURCE_VUPDATE1]; + + c_irq_params->adev = adev; + c_irq_params->irq_src = int_params.irq_source; + + amdgpu_dm_irq_register_interrupt(adev, &int_params, + dm_vupdate_high_irq, c_irq_params); + } + + /* Use GRPH_PFLIP interrupt */ + for (i = DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT; + i <= DCN_1_0__SRCID__HUBP0_FLIP_INTERRUPT + dc->caps.max_otg_num - 1; + i++) { + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, i, &adev->pageflip_irq); + if (r) { + DRM_ERROR("Failed to add page flip irq id!\n"); + return r; + } + + int_params.int_context = INTERRUPT_HIGH_IRQ_CONTEXT; + int_params.irq_source = + dc_interrupt_to_irq_source(dc, i, 0); + + c_irq_params = &adev->dm.pflip_params[int_params.irq_source - DC_IRQ_SOURCE_PFLIP_FIRST]; + + c_irq_params->adev = adev; + c_irq_params->irq_src = int_params.irq_source; + + amdgpu_dm_irq_register_interrupt(adev, &int_params, + dm_pflip_high_irq, c_irq_params); + + } + + /* HPD */ + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DC_HPD1_INT, + &adev->hpd_irq); + if (r) { + DRM_ERROR("Failed to add hpd irq id!\n"); + return r; + } + + register_hpd_handlers(adev); + + return 0; +} +/* Register Outbox IRQ sources and initialize IRQ callbacks */ +static int register_outbox_irq_handlers(struct amdgpu_device *adev) +{ + struct dc *dc = adev->dm.dc; + struct common_irq_params *c_irq_params; + struct dc_interrupt_params int_params = {0}; + int r, i; + + int_params.requested_polarity = INTERRUPT_POLARITY_DEFAULT; + int_params.current_polarity = INTERRUPT_POLARITY_DEFAULT; + + r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_DCE, DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT, + &adev->dmub_outbox_irq); + if (r) { + DRM_ERROR("Failed to add outbox irq id!\n"); + return r; + } + + if (dc->ctx->dmub_srv) { + i = DCN_1_0__SRCID__DMCUB_OUTBOX_LOW_PRIORITY_READY_INT; + int_params.int_context = INTERRUPT_LOW_IRQ_CONTEXT; + int_params.irq_source = + dc_interrupt_to_irq_source(dc, i, 0); + + c_irq_params = &adev->dm.dmub_outbox_params[0]; + + c_irq_params->adev = adev; + c_irq_params->irq_src = int_params.irq_source; + + amdgpu_dm_irq_register_interrupt(adev, &int_params, + dm_dmub_outbox1_low_irq, c_irq_params); + } + + return 0; +} + +/* + * Acquires the lock for the atomic state object and returns + * the new atomic state. + * + * This should only be called during atomic check. + */ +int dm_atomic_get_state(struct drm_atomic_state *state, + struct dm_atomic_state **dm_state) +{ + struct drm_device *dev = state->dev; + struct amdgpu_device *adev = drm_to_adev(dev); + struct amdgpu_display_manager *dm = &adev->dm; + struct drm_private_state *priv_state; + + if (*dm_state) + return 0; + + priv_state = drm_atomic_get_private_obj_state(state, &dm->atomic_obj); + if (IS_ERR(priv_state)) + return PTR_ERR(priv_state); + + *dm_state = to_dm_atomic_state(priv_state); + + return 0; +} + +static struct dm_atomic_state * +dm_atomic_get_new_state(struct drm_atomic_state *state) +{ + struct drm_device *dev = state->dev; + struct amdgpu_device *adev = drm_to_adev(dev); + struct amdgpu_display_manager *dm = &adev->dm; + struct drm_private_obj *obj; + struct drm_private_state *new_obj_state; + int i; + + for_each_new_private_obj_in_state(state, obj, new_obj_state, i) { + if (obj->funcs == dm->atomic_obj.funcs) + return to_dm_atomic_state(new_obj_state); + } + + return NULL; +} + +static struct drm_private_state * +dm_atomic_duplicate_state(struct drm_private_obj *obj) +{ + struct dm_atomic_state *old_state, *new_state; + + new_state = kzalloc(sizeof(*new_state), GFP_KERNEL); + if (!new_state) + return NULL; + + __drm_atomic_helper_private_obj_duplicate_state(obj, &new_state->base); + + old_state = to_dm_atomic_state(obj->state); + + if (old_state && old_state->context) + new_state->context = dc_copy_state(old_state->context); + + if (!new_state->context) { + kfree(new_state); + return NULL; + } + + return &new_state->base; +} + +static void dm_atomic_destroy_state(struct drm_private_obj *obj, + struct drm_private_state *state) +{ + struct dm_atomic_state *dm_state = to_dm_atomic_state(state); + + if (dm_state && dm_state->context) + dc_release_state(dm_state->context); + + kfree(dm_state); +} + +static struct drm_private_state_funcs dm_atomic_state_funcs = { + .atomic_duplicate_state = dm_atomic_duplicate_state, + .atomic_destroy_state = dm_atomic_destroy_state, +}; + +static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev) +{ + struct dm_atomic_state *state; + int r; + + adev->mode_info.mode_config_initialized = true; + + adev_to_drm(adev)->mode_config.funcs = (void *)&amdgpu_dm_mode_funcs; + adev_to_drm(adev)->mode_config.helper_private = &amdgpu_dm_mode_config_helperfuncs; + + adev_to_drm(adev)->mode_config.max_width = 16384; + adev_to_drm(adev)->mode_config.max_height = 16384; + + adev_to_drm(adev)->mode_config.preferred_depth = 24; + if (adev->asic_type == CHIP_HAWAII) + /* disable prefer shadow for now due to hibernation issues */ + adev_to_drm(adev)->mode_config.prefer_shadow = 0; + else + adev_to_drm(adev)->mode_config.prefer_shadow = 1; + /* indicates support for immediate flip */ + adev_to_drm(adev)->mode_config.async_page_flip = true; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return -ENOMEM; + + state->context = dc_create_state(adev->dm.dc); + if (!state->context) { + kfree(state); + return -ENOMEM; + } + + dc_resource_state_copy_construct_current(adev->dm.dc, state->context); + + drm_atomic_private_obj_init(adev_to_drm(adev), + &adev->dm.atomic_obj, + &state->base, + &dm_atomic_state_funcs); + + r = amdgpu_display_modeset_create_props(adev); + if (r) { + dc_release_state(state->context); + kfree(state); + return r; + } + + r = amdgpu_dm_audio_init(adev); + if (r) { + dc_release_state(state->context); + kfree(state); + return r; + } + + return 0; +} + +#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12 +#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255 +#define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50 + +static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm, + int bl_idx) +{ +#if defined(CONFIG_ACPI) + struct amdgpu_dm_backlight_caps caps; + + memset(&caps, 0, sizeof(caps)); + + if (dm->backlight_caps[bl_idx].caps_valid) + return; + + amdgpu_acpi_get_backlight_caps(&caps); + if (caps.caps_valid) { + dm->backlight_caps[bl_idx].caps_valid = true; + if (caps.aux_support) + return; + dm->backlight_caps[bl_idx].min_input_signal = caps.min_input_signal; + dm->backlight_caps[bl_idx].max_input_signal = caps.max_input_signal; + } else { + dm->backlight_caps[bl_idx].min_input_signal = + AMDGPU_DM_DEFAULT_MIN_BACKLIGHT; + dm->backlight_caps[bl_idx].max_input_signal = + AMDGPU_DM_DEFAULT_MAX_BACKLIGHT; + } +#else + if (dm->backlight_caps[bl_idx].aux_support) + return; + + dm->backlight_caps[bl_idx].min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT; + dm->backlight_caps[bl_idx].max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT; +#endif +} + +static int get_brightness_range(const struct amdgpu_dm_backlight_caps *caps, + unsigned int *min, unsigned int *max) +{ + if (!caps) + return 0; + + if (caps->aux_support) { + // Firmware limits are in nits, DC API wants millinits. + *max = 1000 * caps->aux_max_input_signal; + *min = 1000 * caps->aux_min_input_signal; + } else { + // Firmware limits are 8-bit, PWM control is 16-bit. + *max = 0x101 * caps->max_input_signal; + *min = 0x101 * caps->min_input_signal; + } + return 1; +} + +static u32 convert_brightness_from_user(const struct amdgpu_dm_backlight_caps *caps, + uint32_t brightness) +{ + unsigned int min, max; + + if (!get_brightness_range(caps, &min, &max)) + return brightness; + + // Rescale 0..255 to min..max + return min + DIV_ROUND_CLOSEST((max - min) * brightness, + AMDGPU_MAX_BL_LEVEL); +} + +static u32 convert_brightness_to_user(const struct amdgpu_dm_backlight_caps *caps, + uint32_t brightness) +{ + unsigned int min, max; + + if (!get_brightness_range(caps, &min, &max)) + return brightness; + + if (brightness < min) + return 0; + // Rescale min..max to 0..255 + return DIV_ROUND_CLOSEST(AMDGPU_MAX_BL_LEVEL * (brightness - min), + max - min); +} + +static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm, + int bl_idx, + u32 user_brightness) +{ + struct amdgpu_dm_backlight_caps caps; + struct dc_link *link; + u32 brightness; + bool rc; + + amdgpu_dm_update_backlight_caps(dm, bl_idx); + caps = dm->backlight_caps[bl_idx]; + + dm->brightness[bl_idx] = user_brightness; + /* update scratch register */ + if (bl_idx == 0) + amdgpu_atombios_scratch_regs_set_backlight_level(dm->adev, dm->brightness[bl_idx]); + brightness = convert_brightness_from_user(&caps, dm->brightness[bl_idx]); + link = (struct dc_link *)dm->backlight_link[bl_idx]; + + /* Change brightness based on AUX property */ + if (caps.aux_support) { + rc = dc_link_set_backlight_level_nits(link, true, brightness, + AUX_BL_DEFAULT_TRANSITION_TIME_MS); + if (!rc) + DRM_DEBUG("DM: Failed to update backlight via AUX on eDP[%d]\n", bl_idx); + } else { + rc = dc_link_set_backlight_level(link, brightness, 0); + if (!rc) + DRM_DEBUG("DM: Failed to update backlight on eDP[%d]\n", bl_idx); + } + + if (rc) + dm->actual_brightness[bl_idx] = user_brightness; +} + +static int amdgpu_dm_backlight_update_status(struct backlight_device *bd) +{ + struct amdgpu_display_manager *dm = bl_get_data(bd); + int i; + + for (i = 0; i < dm->num_of_edps; i++) { + if (bd == dm->backlight_dev[i]) + break; + } + if (i >= AMDGPU_DM_MAX_NUM_EDP) + i = 0; + amdgpu_dm_backlight_set_level(dm, i, bd->props.brightness); + + return 0; +} + +static u32 amdgpu_dm_backlight_get_level(struct amdgpu_display_manager *dm, + int bl_idx) +{ + int ret; + struct amdgpu_dm_backlight_caps caps; + struct dc_link *link = (struct dc_link *)dm->backlight_link[bl_idx]; + + amdgpu_dm_update_backlight_caps(dm, bl_idx); + caps = dm->backlight_caps[bl_idx]; + + if (caps.aux_support) { + u32 avg, peak; + bool rc; + + rc = dc_link_get_backlight_level_nits(link, &avg, &peak); + if (!rc) + return dm->brightness[bl_idx]; + return convert_brightness_to_user(&caps, avg); + } + + ret = dc_link_get_backlight_level(link); + + if (ret == DC_ERROR_UNEXPECTED) + return dm->brightness[bl_idx]; + + return convert_brightness_to_user(&caps, ret); +} + +static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd) +{ + struct amdgpu_display_manager *dm = bl_get_data(bd); + int i; + + for (i = 0; i < dm->num_of_edps; i++) { + if (bd == dm->backlight_dev[i]) + break; + } + if (i >= AMDGPU_DM_MAX_NUM_EDP) + i = 0; + return amdgpu_dm_backlight_get_level(dm, i); +} + +static const struct backlight_ops amdgpu_dm_backlight_ops = { + .options = BL_CORE_SUSPENDRESUME, + .get_brightness = amdgpu_dm_backlight_get_brightness, + .update_status = amdgpu_dm_backlight_update_status, +}; + +static void +amdgpu_dm_register_backlight_device(struct amdgpu_dm_connector *aconnector) +{ + struct drm_device *drm = aconnector->base.dev; + struct amdgpu_display_manager *dm = &drm_to_adev(drm)->dm; + struct backlight_properties props = { 0 }; + char bl_name[16]; + + if (aconnector->bl_idx == -1) + return; + + if (!acpi_video_backlight_use_native()) { + drm_info(drm, "Skipping amdgpu DM backlight registration\n"); + /* Try registering an ACPI video backlight device instead. */ + acpi_video_register_backlight(); + return; + } + + props.max_brightness = AMDGPU_MAX_BL_LEVEL; + props.brightness = AMDGPU_MAX_BL_LEVEL; + props.type = BACKLIGHT_RAW; + + snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d", + drm->primary->index + aconnector->bl_idx); + + dm->backlight_dev[aconnector->bl_idx] = + backlight_device_register(bl_name, aconnector->base.kdev, dm, + &amdgpu_dm_backlight_ops, &props); + + if (IS_ERR(dm->backlight_dev[aconnector->bl_idx])) { + DRM_ERROR("DM: Backlight registration failed!\n"); + dm->backlight_dev[aconnector->bl_idx] = NULL; + } else + DRM_DEBUG_DRIVER("DM: Registered Backlight device: %s\n", bl_name); +} + +static int initialize_plane(struct amdgpu_display_manager *dm, + struct amdgpu_mode_info *mode_info, int plane_id, + enum drm_plane_type plane_type, + const struct dc_plane_cap *plane_cap) +{ + struct drm_plane *plane; + unsigned long possible_crtcs; + int ret = 0; + + plane = kzalloc(sizeof(struct drm_plane), GFP_KERNEL); + if (!plane) { + DRM_ERROR("KMS: Failed to allocate plane\n"); + return -ENOMEM; + } + plane->type = plane_type; + + /* + * HACK: IGT tests expect that the primary plane for a CRTC + * can only have one possible CRTC. Only expose support for + * any CRTC if they're not going to be used as a primary plane + * for a CRTC - like overlay or underlay planes. + */ + possible_crtcs = 1 << plane_id; + if (plane_id >= dm->dc->caps.max_streams) + possible_crtcs = 0xff; + + ret = amdgpu_dm_plane_init(dm, plane, possible_crtcs, plane_cap); + + if (ret) { + DRM_ERROR("KMS: Failed to initialize plane\n"); + kfree(plane); + return ret; + } + + if (mode_info) + mode_info->planes[plane_id] = plane; + + return ret; +} + + +static void setup_backlight_device(struct amdgpu_display_manager *dm, + struct amdgpu_dm_connector *aconnector) +{ + struct dc_link *link = aconnector->dc_link; + int bl_idx = dm->num_of_edps; + + if (!(link->connector_signal & (SIGNAL_TYPE_EDP | SIGNAL_TYPE_LVDS)) || + link->type == dc_connection_none) + return; + + if (dm->num_of_edps >= AMDGPU_DM_MAX_NUM_EDP) { + drm_warn(adev_to_drm(dm->adev), "Too much eDP connections, skipping backlight setup for additional eDPs\n"); + return; + } + + aconnector->bl_idx = bl_idx; + + amdgpu_dm_update_backlight_caps(dm, bl_idx); + dm->brightness[bl_idx] = AMDGPU_MAX_BL_LEVEL; + dm->backlight_link[bl_idx] = link; + dm->num_of_edps++; + + update_connector_ext_caps(aconnector); +} + +static void amdgpu_set_panel_orientation(struct drm_connector *connector); + +/* + * In this architecture, the association + * connector -> encoder -> crtc + * id not really requried. The crtc and connector will hold the + * display_index as an abstraction to use with DAL component + * + * Returns 0 on success + */ +static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev) +{ + struct amdgpu_display_manager *dm = &adev->dm; + s32 i; + struct amdgpu_dm_connector *aconnector = NULL; + struct amdgpu_encoder *aencoder = NULL; + struct amdgpu_mode_info *mode_info = &adev->mode_info; + u32 link_cnt; + s32 primary_planes; + enum dc_connection_type new_connection_type = dc_connection_none; + const struct dc_plane_cap *plane; + bool psr_feature_enabled = false; + bool replay_feature_enabled = false; + int max_overlay = dm->dc->caps.max_slave_planes; + + dm->display_indexes_num = dm->dc->caps.max_streams; + /* Update the actual used number of crtc */ + adev->mode_info.num_crtc = adev->dm.display_indexes_num; + + amdgpu_dm_set_irq_funcs(adev); + + link_cnt = dm->dc->caps.max_links; + if (amdgpu_dm_mode_config_init(dm->adev)) { + DRM_ERROR("DM: Failed to initialize mode config\n"); + return -EINVAL; + } + + /* There is one primary plane per CRTC */ + primary_planes = dm->dc->caps.max_streams; + ASSERT(primary_planes <= AMDGPU_MAX_PLANES); + + /* + * Initialize primary planes, implicit planes for legacy IOCTLS. + * Order is reversed to match iteration order in atomic check. + */ + for (i = (primary_planes - 1); i >= 0; i--) { + plane = &dm->dc->caps.planes[i]; + + if (initialize_plane(dm, mode_info, i, + DRM_PLANE_TYPE_PRIMARY, plane)) { + DRM_ERROR("KMS: Failed to initialize primary plane\n"); + goto fail; + } + } + + /* + * Initialize overlay planes, index starting after primary planes. + * These planes have a higher DRM index than the primary planes since + * they should be considered as having a higher z-order. + * Order is reversed to match iteration order in atomic check. + * + * Only support DCN for now, and only expose one so we don't encourage + * userspace to use up all the pipes. + */ + for (i = 0; i < dm->dc->caps.max_planes; ++i) { + struct dc_plane_cap *plane = &dm->dc->caps.planes[i]; + + /* Do not create overlay if MPO disabled */ + if (amdgpu_dc_debug_mask & DC_DISABLE_MPO) + break; + + if (plane->type != DC_PLANE_TYPE_DCN_UNIVERSAL) + continue; + + if (!plane->pixel_format_support.argb8888) + continue; + + if (max_overlay-- == 0) + break; + + if (initialize_plane(dm, NULL, primary_planes + i, + DRM_PLANE_TYPE_OVERLAY, plane)) { + DRM_ERROR("KMS: Failed to initialize overlay plane\n"); + goto fail; + } + } + + for (i = 0; i < dm->dc->caps.max_streams; i++) + if (amdgpu_dm_crtc_init(dm, mode_info->planes[i], i)) { + DRM_ERROR("KMS: Failed to initialize crtc\n"); + goto fail; + } + + /* Use Outbox interrupt */ + switch (adev->ip_versions[DCE_HWIP][0]) { + case IP_VERSION(3, 0, 0): + case IP_VERSION(3, 1, 2): + case IP_VERSION(3, 1, 3): + case IP_VERSION(3, 1, 4): + case IP_VERSION(3, 1, 5): + case IP_VERSION(3, 1, 6): + case IP_VERSION(3, 2, 0): + case IP_VERSION(3, 2, 1): + case IP_VERSION(2, 1, 0): + if (register_outbox_irq_handlers(dm->adev)) { + DRM_ERROR("DM: Failed to initialize IRQ\n"); + goto fail; + } + break; + default: + DRM_DEBUG_KMS("Unsupported DCN IP version for outbox: 0x%X\n", + adev->ip_versions[DCE_HWIP][0]); + } + + /* Determine whether to enable PSR support by default. */ + if (!(amdgpu_dc_debug_mask & DC_DISABLE_PSR)) { + switch (adev->ip_versions[DCE_HWIP][0]) { + case IP_VERSION(3, 1, 2): + case IP_VERSION(3, 1, 3): + case IP_VERSION(3, 1, 4): + case IP_VERSION(3, 1, 5): + case IP_VERSION(3, 1, 6): + case IP_VERSION(3, 2, 0): + case IP_VERSION(3, 2, 1): + psr_feature_enabled = true; + break; + default: + psr_feature_enabled = amdgpu_dc_feature_mask & DC_PSR_MASK; + break; + } + } + + if (!(amdgpu_dc_debug_mask & DC_DISABLE_REPLAY)) { + switch (adev->ip_versions[DCE_HWIP][0]) { + case IP_VERSION(3, 1, 4): + case IP_VERSION(3, 1, 5): + case IP_VERSION(3, 1, 6): + case IP_VERSION(3, 2, 0): + case IP_VERSION(3, 2, 1): + replay_feature_enabled = true; + break; + default: + replay_feature_enabled = amdgpu_dc_feature_mask & DC_REPLAY_MASK; + break; + } + } + /* loops over all connectors on the board */ + for (i = 0; i < link_cnt; i++) { + struct dc_link *link = NULL; + + if (i > AMDGPU_DM_MAX_DISPLAY_INDEX) { + DRM_ERROR( + "KMS: Cannot support more than %d display indexes\n", + AMDGPU_DM_MAX_DISPLAY_INDEX); + continue; + } + + aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL); + if (!aconnector) + goto fail; + + aencoder = kzalloc(sizeof(*aencoder), GFP_KERNEL); + if (!aencoder) + goto fail; + + if (amdgpu_dm_encoder_init(dm->ddev, aencoder, i)) { + DRM_ERROR("KMS: Failed to initialize encoder\n"); + goto fail; + } + + if (amdgpu_dm_connector_init(dm, aconnector, i, aencoder)) { + DRM_ERROR("KMS: Failed to initialize connector\n"); + goto fail; + } + + link = dc_get_link_at_index(dm->dc, i); + + if (!dc_link_detect_connection_type(link, &new_connection_type)) + DRM_ERROR("KMS: Failed to detect connector\n"); + + if (aconnector->base.force && new_connection_type == dc_connection_none) { + emulated_link_detect(link); + amdgpu_dm_update_connector_after_detect(aconnector); + } else { + bool ret = false; + + mutex_lock(&dm->dc_lock); + ret = dc_link_detect(link, DETECT_REASON_BOOT); + mutex_unlock(&dm->dc_lock); + + if (ret) { + amdgpu_dm_update_connector_after_detect(aconnector); + setup_backlight_device(dm, aconnector); + + /* + * Disable psr if replay can be enabled + */ + if (replay_feature_enabled && amdgpu_dm_setup_replay(link, aconnector)) + psr_feature_enabled = false; + + if (psr_feature_enabled) + amdgpu_dm_set_psr_caps(link); + + /* TODO: Fix vblank control helpers to delay PSR entry to allow this when + * PSR is also supported. + */ + if (link->psr_settings.psr_feature_enabled) + adev_to_drm(adev)->vblank_disable_immediate = false; + } + } + amdgpu_set_panel_orientation(&aconnector->base); + } + + /* Software is initialized. Now we can register interrupt handlers. */ + switch (adev->asic_type) { +#if defined(CONFIG_DRM_AMD_DC_SI) + case CHIP_TAHITI: + case CHIP_PITCAIRN: + case CHIP_VERDE: + case CHIP_OLAND: + if (dce60_register_irq_handlers(dm->adev)) { + DRM_ERROR("DM: Failed to initialize IRQ\n"); + goto fail; + } + break; +#endif + case CHIP_BONAIRE: + case CHIP_HAWAII: + case CHIP_KAVERI: + case CHIP_KABINI: + case CHIP_MULLINS: + case CHIP_TONGA: + case CHIP_FIJI: + case CHIP_CARRIZO: + case CHIP_STONEY: + case CHIP_POLARIS11: + case CHIP_POLARIS10: + case CHIP_POLARIS12: + case CHIP_VEGAM: + case CHIP_VEGA10: + case CHIP_VEGA12: + case CHIP_VEGA20: + if (dce110_register_irq_handlers(dm->adev)) { + DRM_ERROR("DM: Failed to initialize IRQ\n"); + goto fail; + } + break; + default: + switch (adev->ip_versions[DCE_HWIP][0]) { + case IP_VERSION(1, 0, 0): + case IP_VERSION(1, 0, 1): + case IP_VERSION(2, 0, 2): + case IP_VERSION(2, 0, 3): + case IP_VERSION(2, 0, 0): + case IP_VERSION(2, 1, 0): + case IP_VERSION(3, 0, 0): + case IP_VERSION(3, 0, 2): + case IP_VERSION(3, 0, 3): + case IP_VERSION(3, 0, 1): + case IP_VERSION(3, 1, 2): + case IP_VERSION(3, 1, 3): + case IP_VERSION(3, 1, 4): + case IP_VERSION(3, 1, 5): + case IP_VERSION(3, 1, 6): + case IP_VERSION(3, 2, 0): + case IP_VERSION(3, 2, 1): + if (dcn10_register_irq_handlers(dm->adev)) { + DRM_ERROR("DM: Failed to initialize IRQ\n"); + goto fail; + } + break; + default: + DRM_ERROR("Unsupported DCE IP versions: 0x%X\n", + adev->ip_versions[DCE_HWIP][0]); + goto fail; + } + break; + } + + return 0; +fail: + kfree(aencoder); + kfree(aconnector); + + return -EINVAL; +} + +static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm) +{ + drm_atomic_private_obj_fini(&dm->atomic_obj); +} + +/****************************************************************************** + * amdgpu_display_funcs functions + *****************************************************************************/ + +/* + * dm_bandwidth_update - program display watermarks + * + * @adev: amdgpu_device pointer + * + * Calculate and program the display watermarks and line buffer allocation. + */ +static void dm_bandwidth_update(struct amdgpu_device *adev) +{ + /* TODO: implement later */ +} + +static const struct amdgpu_display_funcs dm_display_funcs = { + .bandwidth_update = dm_bandwidth_update, /* called unconditionally */ + .vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */ + .backlight_set_level = NULL, /* never called for DC */ + .backlight_get_level = NULL, /* never called for DC */ + .hpd_sense = NULL,/* called unconditionally */ + .hpd_set_polarity = NULL, /* called unconditionally */ + .hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */ + .page_flip_get_scanoutpos = + dm_crtc_get_scanoutpos,/* called unconditionally */ + .add_encoder = NULL, /* VBIOS parsing. DAL does it. */ + .add_connector = NULL, /* VBIOS parsing. DAL does it. */ +}; + +#if defined(CONFIG_DEBUG_KERNEL_DC) + +static ssize_t s3_debug_store(struct device *device, + struct device_attribute *attr, + const char *buf, + size_t count) +{ + int ret; + int s3_state; + struct drm_device *drm_dev = dev_get_drvdata(device); + struct amdgpu_device *adev = drm_to_adev(drm_dev); + + ret = kstrtoint(buf, 0, &s3_state); + + if (ret == 0) { + if (s3_state) { + dm_resume(adev); + drm_kms_helper_hotplug_event(adev_to_drm(adev)); + } else + dm_suspend(adev); + } + + return ret == 0 ? count : 0; +} + +DEVICE_ATTR_WO(s3_debug); + +#endif + +static int dm_init_microcode(struct amdgpu_device *adev) +{ + char *fw_name_dmub; + int r; + + switch (adev->ip_versions[DCE_HWIP][0]) { + case IP_VERSION(2, 1, 0): + fw_name_dmub = FIRMWARE_RENOIR_DMUB; + if (ASICREV_IS_GREEN_SARDINE(adev->external_rev_id)) + fw_name_dmub = FIRMWARE_GREEN_SARDINE_DMUB; + break; + case IP_VERSION(3, 0, 0): + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) + fw_name_dmub = FIRMWARE_SIENNA_CICHLID_DMUB; + else + fw_name_dmub = FIRMWARE_NAVY_FLOUNDER_DMUB; + break; + case IP_VERSION(3, 0, 1): + fw_name_dmub = FIRMWARE_VANGOGH_DMUB; + break; + case IP_VERSION(3, 0, 2): + fw_name_dmub = FIRMWARE_DIMGREY_CAVEFISH_DMUB; + break; + case IP_VERSION(3, 0, 3): + fw_name_dmub = FIRMWARE_BEIGE_GOBY_DMUB; + break; + case IP_VERSION(3, 1, 2): + case IP_VERSION(3, 1, 3): + fw_name_dmub = FIRMWARE_YELLOW_CARP_DMUB; + break; + case IP_VERSION(3, 1, 4): + fw_name_dmub = FIRMWARE_DCN_314_DMUB; + break; + case IP_VERSION(3, 1, 5): + fw_name_dmub = FIRMWARE_DCN_315_DMUB; + break; + case IP_VERSION(3, 1, 6): + fw_name_dmub = FIRMWARE_DCN316_DMUB; + break; + case IP_VERSION(3, 2, 0): + fw_name_dmub = FIRMWARE_DCN_V3_2_0_DMCUB; + break; + case IP_VERSION(3, 2, 1): + fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB; + break; + default: + /* ASIC doesn't support DMUB. */ + return 0; + } + r = amdgpu_ucode_request(adev, &adev->dm.dmub_fw, fw_name_dmub); + if (r) + DRM_ERROR("DMUB firmware loading failed: %d\n", r); + return r; +} + +static int dm_early_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_mode_info *mode_info = &adev->mode_info; + struct atom_context *ctx = mode_info->atom_context; + int index = GetIndexIntoMasterTable(DATA, Object_Header); + u16 data_offset; + + /* if there is no object header, skip DM */ + if (!amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) { + adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK; + dev_info(adev->dev, "No object header, skipping DM\n"); + return -ENOENT; + } + + switch (adev->asic_type) { +#if defined(CONFIG_DRM_AMD_DC_SI) + case CHIP_TAHITI: + case CHIP_PITCAIRN: + case CHIP_VERDE: + adev->mode_info.num_crtc = 6; + adev->mode_info.num_hpd = 6; + adev->mode_info.num_dig = 6; + break; + case CHIP_OLAND: + adev->mode_info.num_crtc = 2; + adev->mode_info.num_hpd = 2; + adev->mode_info.num_dig = 2; + break; +#endif + case CHIP_BONAIRE: + case CHIP_HAWAII: + adev->mode_info.num_crtc = 6; + adev->mode_info.num_hpd = 6; + adev->mode_info.num_dig = 6; + break; + case CHIP_KAVERI: + adev->mode_info.num_crtc = 4; + adev->mode_info.num_hpd = 6; + adev->mode_info.num_dig = 7; + break; + case CHIP_KABINI: + case CHIP_MULLINS: + adev->mode_info.num_crtc = 2; + adev->mode_info.num_hpd = 6; + adev->mode_info.num_dig = 6; + break; + case CHIP_FIJI: + case CHIP_TONGA: + adev->mode_info.num_crtc = 6; + adev->mode_info.num_hpd = 6; + adev->mode_info.num_dig = 7; + break; + case CHIP_CARRIZO: + adev->mode_info.num_crtc = 3; + adev->mode_info.num_hpd = 6; + adev->mode_info.num_dig = 9; + break; + case CHIP_STONEY: + adev->mode_info.num_crtc = 2; + adev->mode_info.num_hpd = 6; + adev->mode_info.num_dig = 9; + break; + case CHIP_POLARIS11: + case CHIP_POLARIS12: + adev->mode_info.num_crtc = 5; + adev->mode_info.num_hpd = 5; + adev->mode_info.num_dig = 5; + break; + case CHIP_POLARIS10: + case CHIP_VEGAM: + adev->mode_info.num_crtc = 6; + adev->mode_info.num_hpd = 6; + adev->mode_info.num_dig = 6; + break; + case CHIP_VEGA10: + case CHIP_VEGA12: + case CHIP_VEGA20: + adev->mode_info.num_crtc = 6; + adev->mode_info.num_hpd = 6; + adev->mode_info.num_dig = 6; + break; + default: + + switch (adev->ip_versions[DCE_HWIP][0]) { + case IP_VERSION(2, 0, 2): + case IP_VERSION(3, 0, 0): + adev->mode_info.num_crtc = 6; + adev->mode_info.num_hpd = 6; + adev->mode_info.num_dig = 6; + break; + case IP_VERSION(2, 0, 0): + case IP_VERSION(3, 0, 2): + adev->mode_info.num_crtc = 5; + adev->mode_info.num_hpd = 5; + adev->mode_info.num_dig = 5; + break; + case IP_VERSION(2, 0, 3): + case IP_VERSION(3, 0, 3): + adev->mode_info.num_crtc = 2; + adev->mode_info.num_hpd = 2; + adev->mode_info.num_dig = 2; + break; + case IP_VERSION(1, 0, 0): + case IP_VERSION(1, 0, 1): + case IP_VERSION(3, 0, 1): + case IP_VERSION(2, 1, 0): + case IP_VERSION(3, 1, 2): + case IP_VERSION(3, 1, 3): + case IP_VERSION(3, 1, 4): + case IP_VERSION(3, 1, 5): + case IP_VERSION(3, 1, 6): + case IP_VERSION(3, 2, 0): + case IP_VERSION(3, 2, 1): + adev->mode_info.num_crtc = 4; + adev->mode_info.num_hpd = 4; + adev->mode_info.num_dig = 4; + break; + default: + DRM_ERROR("Unsupported DCE IP versions: 0x%x\n", + adev->ip_versions[DCE_HWIP][0]); + return -EINVAL; + } + break; + } + + if (adev->mode_info.funcs == NULL) + adev->mode_info.funcs = &dm_display_funcs; + + /* + * Note: Do NOT change adev->audio_endpt_rreg and + * adev->audio_endpt_wreg because they are initialised in + * amdgpu_device_init() + */ +#if defined(CONFIG_DEBUG_KERNEL_DC) + device_create_file( + adev_to_drm(adev)->dev, + &dev_attr_s3_debug); +#endif + adev->dc_enabled = true; + + return dm_init_microcode(adev); +} + +static bool modereset_required(struct drm_crtc_state *crtc_state) +{ + return !crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state); +} + +static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder) +{ + drm_encoder_cleanup(encoder); + kfree(encoder); +} + +static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = { + .destroy = amdgpu_dm_encoder_destroy, +}; + +static int +fill_plane_color_attributes(const struct drm_plane_state *plane_state, + const enum surface_pixel_format format, + enum dc_color_space *color_space) +{ + bool full_range; + + *color_space = COLOR_SPACE_SRGB; + + /* DRM color properties only affect non-RGB formats. */ + if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) + return 0; + + full_range = (plane_state->color_range == DRM_COLOR_YCBCR_FULL_RANGE); + + switch (plane_state->color_encoding) { + case DRM_COLOR_YCBCR_BT601: + if (full_range) + *color_space = COLOR_SPACE_YCBCR601; + else + *color_space = COLOR_SPACE_YCBCR601_LIMITED; + break; + + case DRM_COLOR_YCBCR_BT709: + if (full_range) + *color_space = COLOR_SPACE_YCBCR709; + else + *color_space = COLOR_SPACE_YCBCR709_LIMITED; + break; + + case DRM_COLOR_YCBCR_BT2020: + if (full_range) + *color_space = COLOR_SPACE_2020_YCBCR; + else + return -EINVAL; + break; + + default: + return -EINVAL; + } + + return 0; +} + +static int +fill_dc_plane_info_and_addr(struct amdgpu_device *adev, + const struct drm_plane_state *plane_state, + const u64 tiling_flags, + struct dc_plane_info *plane_info, + struct dc_plane_address *address, + bool tmz_surface, + bool force_disable_dcc) +{ + const struct drm_framebuffer *fb = plane_state->fb; + const struct amdgpu_framebuffer *afb = + to_amdgpu_framebuffer(plane_state->fb); + int ret; + + memset(plane_info, 0, sizeof(*plane_info)); + + switch (fb->format->format) { + case DRM_FORMAT_C8: + plane_info->format = + SURFACE_PIXEL_FORMAT_GRPH_PALETA_256_COLORS; + break; + case DRM_FORMAT_RGB565: + plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_RGB565; + break; + case DRM_FORMAT_XRGB8888: + case DRM_FORMAT_ARGB8888: + plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888; + break; + case DRM_FORMAT_XRGB2101010: + case DRM_FORMAT_ARGB2101010: + plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010; + break; + case DRM_FORMAT_XBGR2101010: + case DRM_FORMAT_ABGR2101010: + plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010; + break; + case DRM_FORMAT_XBGR8888: + case DRM_FORMAT_ABGR8888: + plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR8888; + break; + case DRM_FORMAT_NV21: + plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr; + break; + case DRM_FORMAT_NV12: + plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb; + break; + case DRM_FORMAT_P010: + plane_info->format = SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb; + break; + case DRM_FORMAT_XRGB16161616F: + case DRM_FORMAT_ARGB16161616F: + plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F; + break; + case DRM_FORMAT_XBGR16161616F: + case DRM_FORMAT_ABGR16161616F: + plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F; + break; + case DRM_FORMAT_XRGB16161616: + case DRM_FORMAT_ARGB16161616: + plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616; + break; + case DRM_FORMAT_XBGR16161616: + case DRM_FORMAT_ABGR16161616: + plane_info->format = SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616; + break; + default: + DRM_ERROR( + "Unsupported screen format %p4cc\n", + &fb->format->format); + return -EINVAL; + } + + switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) { + case DRM_MODE_ROTATE_0: + plane_info->rotation = ROTATION_ANGLE_0; + break; + case DRM_MODE_ROTATE_90: + plane_info->rotation = ROTATION_ANGLE_90; + break; + case DRM_MODE_ROTATE_180: + plane_info->rotation = ROTATION_ANGLE_180; + break; + case DRM_MODE_ROTATE_270: + plane_info->rotation = ROTATION_ANGLE_270; + break; + default: + plane_info->rotation = ROTATION_ANGLE_0; + break; + } + + + plane_info->visible = true; + plane_info->stereo_format = PLANE_STEREO_FORMAT_NONE; + + plane_info->layer_index = plane_state->normalized_zpos; + + ret = fill_plane_color_attributes(plane_state, plane_info->format, + &plane_info->color_space); + if (ret) + return ret; + + ret = amdgpu_dm_plane_fill_plane_buffer_attributes(adev, afb, plane_info->format, + plane_info->rotation, tiling_flags, + &plane_info->tiling_info, + &plane_info->plane_size, + &plane_info->dcc, address, + tmz_surface, force_disable_dcc); + if (ret) + return ret; + + amdgpu_dm_plane_fill_blending_from_plane_state( + plane_state, &plane_info->per_pixel_alpha, &plane_info->pre_multiplied_alpha, + &plane_info->global_alpha, &plane_info->global_alpha_value); + + return 0; +} + +static int fill_dc_plane_attributes(struct amdgpu_device *adev, + struct dc_plane_state *dc_plane_state, + struct drm_plane_state *plane_state, + struct drm_crtc_state *crtc_state) +{ + struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state); + struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)plane_state->fb; + struct dc_scaling_info scaling_info; + struct dc_plane_info plane_info; + int ret; + bool force_disable_dcc = false; + + ret = amdgpu_dm_plane_fill_dc_scaling_info(adev, plane_state, &scaling_info); + if (ret) + return ret; + + dc_plane_state->src_rect = scaling_info.src_rect; + dc_plane_state->dst_rect = scaling_info.dst_rect; + dc_plane_state->clip_rect = scaling_info.clip_rect; + dc_plane_state->scaling_quality = scaling_info.scaling_quality; + + force_disable_dcc = adev->asic_type == CHIP_RAVEN && adev->in_suspend; + ret = fill_dc_plane_info_and_addr(adev, plane_state, + afb->tiling_flags, + &plane_info, + &dc_plane_state->address, + afb->tmz_surface, + force_disable_dcc); + if (ret) + return ret; + + dc_plane_state->format = plane_info.format; + dc_plane_state->color_space = plane_info.color_space; + dc_plane_state->format = plane_info.format; + dc_plane_state->plane_size = plane_info.plane_size; + dc_plane_state->rotation = plane_info.rotation; + dc_plane_state->horizontal_mirror = plane_info.horizontal_mirror; + dc_plane_state->stereo_format = plane_info.stereo_format; + dc_plane_state->tiling_info = plane_info.tiling_info; + dc_plane_state->visible = plane_info.visible; + dc_plane_state->per_pixel_alpha = plane_info.per_pixel_alpha; + dc_plane_state->pre_multiplied_alpha = plane_info.pre_multiplied_alpha; + dc_plane_state->global_alpha = plane_info.global_alpha; + dc_plane_state->global_alpha_value = plane_info.global_alpha_value; + dc_plane_state->dcc = plane_info.dcc; + dc_plane_state->layer_index = plane_info.layer_index; + dc_plane_state->flip_int_enabled = true; + + /* + * Always set input transfer function, since plane state is refreshed + * every time. + */ + ret = amdgpu_dm_update_plane_color_mgmt(dm_crtc_state, dc_plane_state); + if (ret) + return ret; + + return 0; +} + +static inline void fill_dc_dirty_rect(struct drm_plane *plane, + struct rect *dirty_rect, int32_t x, + s32 y, s32 width, s32 height, + int *i, bool ffu) +{ + WARN_ON(*i >= DC_MAX_DIRTY_RECTS); + + dirty_rect->x = x; + dirty_rect->y = y; + dirty_rect->width = width; + dirty_rect->height = height; + + if (ffu) + drm_dbg(plane->dev, + "[PLANE:%d] PSR FFU dirty rect size (%d, %d)\n", + plane->base.id, width, height); + else + drm_dbg(plane->dev, + "[PLANE:%d] PSR SU dirty rect at (%d, %d) size (%d, %d)", + plane->base.id, x, y, width, height); + + (*i)++; +} + +/** + * fill_dc_dirty_rects() - Fill DC dirty regions for PSR selective updates + * + * @plane: DRM plane containing dirty regions that need to be flushed to the eDP + * remote fb + * @old_plane_state: Old state of @plane + * @new_plane_state: New state of @plane + * @crtc_state: New state of CRTC connected to the @plane + * @flip_addrs: DC flip tracking struct, which also tracts dirty rects + * @dirty_regions_changed: dirty regions changed + * + * For PSR SU, DC informs the DMUB uController of dirty rectangle regions + * (referred to as "damage clips" in DRM nomenclature) that require updating on + * the eDP remote buffer. The responsibility of specifying the dirty regions is + * amdgpu_dm's. + * + * A damage-aware DRM client should fill the FB_DAMAGE_CLIPS property on the + * plane with regions that require flushing to the eDP remote buffer. In + * addition, certain use cases - such as cursor and multi-plane overlay (MPO) - + * implicitly provide damage clips without any client support via the plane + * bounds. + */ +static void fill_dc_dirty_rects(struct drm_plane *plane, + struct drm_plane_state *old_plane_state, + struct drm_plane_state *new_plane_state, + struct drm_crtc_state *crtc_state, + struct dc_flip_addrs *flip_addrs, + bool *dirty_regions_changed) +{ + struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state); + struct rect *dirty_rects = flip_addrs->dirty_rects; + u32 num_clips; + struct drm_mode_rect *clips; + bool bb_changed; + bool fb_changed; + u32 i = 0; + *dirty_regions_changed = false; + + /* + * Cursor plane has it's own dirty rect update interface. See + * dcn10_dmub_update_cursor_data and dmub_cmd_update_cursor_info_data + */ + if (plane->type == DRM_PLANE_TYPE_CURSOR) + return; + + if (new_plane_state->rotation != DRM_MODE_ROTATE_0) + goto ffu; + + num_clips = drm_plane_get_damage_clips_count(new_plane_state); + clips = drm_plane_get_damage_clips(new_plane_state); + + if (!dm_crtc_state->mpo_requested) { + if (!num_clips || num_clips > DC_MAX_DIRTY_RECTS) + goto ffu; + + for (; flip_addrs->dirty_rect_count < num_clips; clips++) + fill_dc_dirty_rect(new_plane_state->plane, + &dirty_rects[flip_addrs->dirty_rect_count], + clips->x1, clips->y1, + clips->x2 - clips->x1, clips->y2 - clips->y1, + &flip_addrs->dirty_rect_count, + false); + return; + } + + /* + * MPO is requested. Add entire plane bounding box to dirty rects if + * flipped to or damaged. + * + * If plane is moved or resized, also add old bounding box to dirty + * rects. + */ + fb_changed = old_plane_state->fb->base.id != + new_plane_state->fb->base.id; + bb_changed = (old_plane_state->crtc_x != new_plane_state->crtc_x || + old_plane_state->crtc_y != new_plane_state->crtc_y || + old_plane_state->crtc_w != new_plane_state->crtc_w || + old_plane_state->crtc_h != new_plane_state->crtc_h); + + drm_dbg(plane->dev, + "[PLANE:%d] PSR bb_changed:%d fb_changed:%d num_clips:%d\n", + new_plane_state->plane->base.id, + bb_changed, fb_changed, num_clips); + + *dirty_regions_changed = bb_changed; + + if ((num_clips + (bb_changed ? 2 : 0)) > DC_MAX_DIRTY_RECTS) + goto ffu; + + if (bb_changed) { + fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i], + new_plane_state->crtc_x, + new_plane_state->crtc_y, + new_plane_state->crtc_w, + new_plane_state->crtc_h, &i, false); + + /* Add old plane bounding-box if plane is moved or resized */ + fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i], + old_plane_state->crtc_x, + old_plane_state->crtc_y, + old_plane_state->crtc_w, + old_plane_state->crtc_h, &i, false); + } + + if (num_clips) { + for (; i < num_clips; clips++) + fill_dc_dirty_rect(new_plane_state->plane, + &dirty_rects[i], clips->x1, + clips->y1, clips->x2 - clips->x1, + clips->y2 - clips->y1, &i, false); + } else if (fb_changed && !bb_changed) { + fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[i], + new_plane_state->crtc_x, + new_plane_state->crtc_y, + new_plane_state->crtc_w, + new_plane_state->crtc_h, &i, false); + } + + flip_addrs->dirty_rect_count = i; + return; + +ffu: + fill_dc_dirty_rect(new_plane_state->plane, &dirty_rects[0], 0, 0, + dm_crtc_state->base.mode.crtc_hdisplay, + dm_crtc_state->base.mode.crtc_vdisplay, + &flip_addrs->dirty_rect_count, true); +} + +static void update_stream_scaling_settings(const struct drm_display_mode *mode, + const struct dm_connector_state *dm_state, + struct dc_stream_state *stream) +{ + enum amdgpu_rmx_type rmx_type; + + struct rect src = { 0 }; /* viewport in composition space*/ + struct rect dst = { 0 }; /* stream addressable area */ + + /* no mode. nothing to be done */ + if (!mode) + return; + + /* Full screen scaling by default */ + src.width = mode->hdisplay; + src.height = mode->vdisplay; + dst.width = stream->timing.h_addressable; + dst.height = stream->timing.v_addressable; + + if (dm_state) { + rmx_type = dm_state->scaling; + if (rmx_type == RMX_ASPECT || rmx_type == RMX_OFF) { + if (src.width * dst.height < + src.height * dst.width) { + /* height needs less upscaling/more downscaling */ + dst.width = src.width * + dst.height / src.height; + } else { + /* width needs less upscaling/more downscaling */ + dst.height = src.height * + dst.width / src.width; + } + } else if (rmx_type == RMX_CENTER) { + dst = src; + } + + dst.x = (stream->timing.h_addressable - dst.width) / 2; + dst.y = (stream->timing.v_addressable - dst.height) / 2; + + if (dm_state->underscan_enable) { + dst.x += dm_state->underscan_hborder / 2; + dst.y += dm_state->underscan_vborder / 2; + dst.width -= dm_state->underscan_hborder; + dst.height -= dm_state->underscan_vborder; + } + } + + stream->src = src; + stream->dst = dst; + + DRM_DEBUG_KMS("Destination Rectangle x:%d y:%d width:%d height:%d\n", + dst.x, dst.y, dst.width, dst.height); + +} + +static enum dc_color_depth +convert_color_depth_from_display_info(const struct drm_connector *connector, + bool is_y420, int requested_bpc) +{ + u8 bpc; + + if (is_y420) { + bpc = 8; + + /* Cap display bpc based on HDMI 2.0 HF-VSDB */ + if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48) + bpc = 16; + else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36) + bpc = 12; + else if (connector->display_info.hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30) + bpc = 10; + } else { + bpc = (uint8_t)connector->display_info.bpc; + /* Assume 8 bpc by default if no bpc is specified. */ + bpc = bpc ? bpc : 8; + } + + if (requested_bpc > 0) { + /* + * Cap display bpc based on the user requested value. + * + * The value for state->max_bpc may not correctly updated + * depending on when the connector gets added to the state + * or if this was called outside of atomic check, so it + * can't be used directly. + */ + bpc = min_t(u8, bpc, requested_bpc); + + /* Round down to the nearest even number. */ + bpc = bpc - (bpc & 1); + } + + switch (bpc) { + case 0: + /* + * Temporary Work around, DRM doesn't parse color depth for + * EDID revision before 1.4 + * TODO: Fix edid parsing + */ + return COLOR_DEPTH_888; + case 6: + return COLOR_DEPTH_666; + case 8: + return COLOR_DEPTH_888; + case 10: + return COLOR_DEPTH_101010; + case 12: + return COLOR_DEPTH_121212; + case 14: + return COLOR_DEPTH_141414; + case 16: + return COLOR_DEPTH_161616; + default: + return COLOR_DEPTH_UNDEFINED; + } +} + +static enum dc_aspect_ratio +get_aspect_ratio(const struct drm_display_mode *mode_in) +{ + /* 1-1 mapping, since both enums follow the HDMI spec. */ + return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio; +} + +static enum dc_color_space +get_output_color_space(const struct dc_crtc_timing *dc_crtc_timing, + const struct drm_connector_state *connector_state) +{ + enum dc_color_space color_space = COLOR_SPACE_SRGB; + + switch (connector_state->colorspace) { + case DRM_MODE_COLORIMETRY_BT601_YCC: + if (dc_crtc_timing->flags.Y_ONLY) + color_space = COLOR_SPACE_YCBCR601_LIMITED; + else + color_space = COLOR_SPACE_YCBCR601; + break; + case DRM_MODE_COLORIMETRY_BT709_YCC: + if (dc_crtc_timing->flags.Y_ONLY) + color_space = COLOR_SPACE_YCBCR709_LIMITED; + else + color_space = COLOR_SPACE_YCBCR709; + break; + case DRM_MODE_COLORIMETRY_OPRGB: + color_space = COLOR_SPACE_ADOBERGB; + break; + case DRM_MODE_COLORIMETRY_BT2020_RGB: + case DRM_MODE_COLORIMETRY_BT2020_YCC: + if (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB) + color_space = COLOR_SPACE_2020_RGB_FULLRANGE; + else + color_space = COLOR_SPACE_2020_YCBCR; + break; + case DRM_MODE_COLORIMETRY_DEFAULT: // ITU601 + default: + if (dc_crtc_timing->pixel_encoding == PIXEL_ENCODING_RGB) { + color_space = COLOR_SPACE_SRGB; + /* + * 27030khz is the separation point between HDTV and SDTV + * according to HDMI spec, we use YCbCr709 and YCbCr601 + * respectively + */ + } else if (dc_crtc_timing->pix_clk_100hz > 270300) { + if (dc_crtc_timing->flags.Y_ONLY) + color_space = + COLOR_SPACE_YCBCR709_LIMITED; + else + color_space = COLOR_SPACE_YCBCR709; + } else { + if (dc_crtc_timing->flags.Y_ONLY) + color_space = + COLOR_SPACE_YCBCR601_LIMITED; + else + color_space = COLOR_SPACE_YCBCR601; + } + break; + } + + return color_space; +} + +static bool adjust_colour_depth_from_display_info( + struct dc_crtc_timing *timing_out, + const struct drm_display_info *info) +{ + enum dc_color_depth depth = timing_out->display_color_depth; + int normalized_clk; + + do { + normalized_clk = timing_out->pix_clk_100hz / 10; + /* YCbCr 4:2:0 requires additional adjustment of 1/2 */ + if (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420) + normalized_clk /= 2; + /* Adjusting pix clock following on HDMI spec based on colour depth */ + switch (depth) { + case COLOR_DEPTH_888: + break; + case COLOR_DEPTH_101010: + normalized_clk = (normalized_clk * 30) / 24; + break; + case COLOR_DEPTH_121212: + normalized_clk = (normalized_clk * 36) / 24; + break; + case COLOR_DEPTH_161616: + normalized_clk = (normalized_clk * 48) / 24; + break; + default: + /* The above depths are the only ones valid for HDMI. */ + return false; + } + if (normalized_clk <= info->max_tmds_clock) { + timing_out->display_color_depth = depth; + return true; + } + } while (--depth > COLOR_DEPTH_666); + return false; +} + +static void fill_stream_properties_from_drm_display_mode( + struct dc_stream_state *stream, + const struct drm_display_mode *mode_in, + const struct drm_connector *connector, + const struct drm_connector_state *connector_state, + const struct dc_stream_state *old_stream, + int requested_bpc) +{ + struct dc_crtc_timing *timing_out = &stream->timing; + const struct drm_display_info *info = &connector->display_info; + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + struct hdmi_vendor_infoframe hv_frame; + struct hdmi_avi_infoframe avi_frame; + + memset(&hv_frame, 0, sizeof(hv_frame)); + memset(&avi_frame, 0, sizeof(avi_frame)); + + timing_out->h_border_left = 0; + timing_out->h_border_right = 0; + timing_out->v_border_top = 0; + timing_out->v_border_bottom = 0; + /* TODO: un-hardcode */ + if (drm_mode_is_420_only(info, mode_in) + && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) + timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; + else if (drm_mode_is_420_also(info, mode_in) + && aconnector->force_yuv420_output) + timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; + else if ((connector->display_info.color_formats & DRM_COLOR_FORMAT_YCBCR444) + && stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) + timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR444; + else + timing_out->pixel_encoding = PIXEL_ENCODING_RGB; + + timing_out->timing_3d_format = TIMING_3D_FORMAT_NONE; + timing_out->display_color_depth = convert_color_depth_from_display_info( + connector, + (timing_out->pixel_encoding == PIXEL_ENCODING_YCBCR420), + requested_bpc); + timing_out->scan_type = SCANNING_TYPE_NODATA; + timing_out->hdmi_vic = 0; + + if (old_stream) { + timing_out->vic = old_stream->timing.vic; + timing_out->flags.HSYNC_POSITIVE_POLARITY = old_stream->timing.flags.HSYNC_POSITIVE_POLARITY; + timing_out->flags.VSYNC_POSITIVE_POLARITY = old_stream->timing.flags.VSYNC_POSITIVE_POLARITY; + } else { + timing_out->vic = drm_match_cea_mode(mode_in); + if (mode_in->flags & DRM_MODE_FLAG_PHSYNC) + timing_out->flags.HSYNC_POSITIVE_POLARITY = 1; + if (mode_in->flags & DRM_MODE_FLAG_PVSYNC) + timing_out->flags.VSYNC_POSITIVE_POLARITY = 1; + } + + if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) { + drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, (struct drm_connector *)connector, mode_in); + timing_out->vic = avi_frame.video_code; + drm_hdmi_vendor_infoframe_from_display_mode(&hv_frame, (struct drm_connector *)connector, mode_in); + timing_out->hdmi_vic = hv_frame.vic; + } + + if (is_freesync_video_mode(mode_in, aconnector)) { + timing_out->h_addressable = mode_in->hdisplay; + timing_out->h_total = mode_in->htotal; + timing_out->h_sync_width = mode_in->hsync_end - mode_in->hsync_start; + timing_out->h_front_porch = mode_in->hsync_start - mode_in->hdisplay; + timing_out->v_total = mode_in->vtotal; + timing_out->v_addressable = mode_in->vdisplay; + timing_out->v_front_porch = mode_in->vsync_start - mode_in->vdisplay; + timing_out->v_sync_width = mode_in->vsync_end - mode_in->vsync_start; + timing_out->pix_clk_100hz = mode_in->clock * 10; + } else { + timing_out->h_addressable = mode_in->crtc_hdisplay; + timing_out->h_total = mode_in->crtc_htotal; + timing_out->h_sync_width = mode_in->crtc_hsync_end - mode_in->crtc_hsync_start; + timing_out->h_front_porch = mode_in->crtc_hsync_start - mode_in->crtc_hdisplay; + timing_out->v_total = mode_in->crtc_vtotal; + timing_out->v_addressable = mode_in->crtc_vdisplay; + timing_out->v_front_porch = mode_in->crtc_vsync_start - mode_in->crtc_vdisplay; + timing_out->v_sync_width = mode_in->crtc_vsync_end - mode_in->crtc_vsync_start; + timing_out->pix_clk_100hz = mode_in->crtc_clock * 10; + } + + timing_out->aspect_ratio = get_aspect_ratio(mode_in); + + stream->out_transfer_func->type = TF_TYPE_PREDEFINED; + stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB; + if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) { + if (!adjust_colour_depth_from_display_info(timing_out, info) && + drm_mode_is_420_also(info, mode_in) && + timing_out->pixel_encoding != PIXEL_ENCODING_YCBCR420) { + timing_out->pixel_encoding = PIXEL_ENCODING_YCBCR420; + adjust_colour_depth_from_display_info(timing_out, info); + } + } + + stream->output_color_space = get_output_color_space(timing_out, connector_state); +} + +static void fill_audio_info(struct audio_info *audio_info, + const struct drm_connector *drm_connector, + const struct dc_sink *dc_sink) +{ + int i = 0; + int cea_revision = 0; + const struct dc_edid_caps *edid_caps = &dc_sink->edid_caps; + + audio_info->manufacture_id = edid_caps->manufacturer_id; + audio_info->product_id = edid_caps->product_id; + + cea_revision = drm_connector->display_info.cea_rev; + + strscpy(audio_info->display_name, + edid_caps->display_name, + AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS); + + if (cea_revision >= 3) { + audio_info->mode_count = edid_caps->audio_mode_count; + + for (i = 0; i < audio_info->mode_count; ++i) { + audio_info->modes[i].format_code = + (enum audio_format_code) + (edid_caps->audio_modes[i].format_code); + audio_info->modes[i].channel_count = + edid_caps->audio_modes[i].channel_count; + audio_info->modes[i].sample_rates.all = + edid_caps->audio_modes[i].sample_rate; + audio_info->modes[i].sample_size = + edid_caps->audio_modes[i].sample_size; + } + } + + audio_info->flags.all = edid_caps->speaker_flags; + + /* TODO: We only check for the progressive mode, check for interlace mode too */ + if (drm_connector->latency_present[0]) { + audio_info->video_latency = drm_connector->video_latency[0]; + audio_info->audio_latency = drm_connector->audio_latency[0]; + } + + /* TODO: For DP, video and audio latency should be calculated from DPCD caps */ + +} + +static void +copy_crtc_timing_for_drm_display_mode(const struct drm_display_mode *src_mode, + struct drm_display_mode *dst_mode) +{ + dst_mode->crtc_hdisplay = src_mode->crtc_hdisplay; + dst_mode->crtc_vdisplay = src_mode->crtc_vdisplay; + dst_mode->crtc_clock = src_mode->crtc_clock; + dst_mode->crtc_hblank_start = src_mode->crtc_hblank_start; + dst_mode->crtc_hblank_end = src_mode->crtc_hblank_end; + dst_mode->crtc_hsync_start = src_mode->crtc_hsync_start; + dst_mode->crtc_hsync_end = src_mode->crtc_hsync_end; + dst_mode->crtc_htotal = src_mode->crtc_htotal; + dst_mode->crtc_hskew = src_mode->crtc_hskew; + dst_mode->crtc_vblank_start = src_mode->crtc_vblank_start; + dst_mode->crtc_vblank_end = src_mode->crtc_vblank_end; + dst_mode->crtc_vsync_start = src_mode->crtc_vsync_start; + dst_mode->crtc_vsync_end = src_mode->crtc_vsync_end; + dst_mode->crtc_vtotal = src_mode->crtc_vtotal; +} + +static void +decide_crtc_timing_for_drm_display_mode(struct drm_display_mode *drm_mode, + const struct drm_display_mode *native_mode, + bool scale_enabled) +{ + if (scale_enabled) { + copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode); + } else if (native_mode->clock == drm_mode->clock && + native_mode->htotal == drm_mode->htotal && + native_mode->vtotal == drm_mode->vtotal) { + copy_crtc_timing_for_drm_display_mode(native_mode, drm_mode); + } else { + /* no scaling nor amdgpu inserted, no need to patch */ + } +} + +static struct dc_sink * +create_fake_sink(struct amdgpu_dm_connector *aconnector) +{ + struct dc_sink_init_data sink_init_data = { 0 }; + struct dc_sink *sink = NULL; + + sink_init_data.link = aconnector->dc_link; + sink_init_data.sink_signal = aconnector->dc_link->connector_signal; + + sink = dc_sink_create(&sink_init_data); + if (!sink) { + DRM_ERROR("Failed to create sink!\n"); + return NULL; + } + sink->sink_signal = SIGNAL_TYPE_VIRTUAL; + + return sink; +} + +static void set_multisync_trigger_params( + struct dc_stream_state *stream) +{ + struct dc_stream_state *master = NULL; + + if (stream->triggered_crtc_reset.enabled) { + master = stream->triggered_crtc_reset.event_source; + stream->triggered_crtc_reset.event = + master->timing.flags.VSYNC_POSITIVE_POLARITY ? + CRTC_EVENT_VSYNC_RISING : CRTC_EVENT_VSYNC_FALLING; + stream->triggered_crtc_reset.delay = TRIGGER_DELAY_NEXT_PIXEL; + } +} + +static void set_master_stream(struct dc_stream_state *stream_set[], + int stream_count) +{ + int j, highest_rfr = 0, master_stream = 0; + + for (j = 0; j < stream_count; j++) { + if (stream_set[j] && stream_set[j]->triggered_crtc_reset.enabled) { + int refresh_rate = 0; + + refresh_rate = (stream_set[j]->timing.pix_clk_100hz*100)/ + (stream_set[j]->timing.h_total*stream_set[j]->timing.v_total); + if (refresh_rate > highest_rfr) { + highest_rfr = refresh_rate; + master_stream = j; + } + } + } + for (j = 0; j < stream_count; j++) { + if (stream_set[j]) + stream_set[j]->triggered_crtc_reset.event_source = stream_set[master_stream]; + } +} + +static void dm_enable_per_frame_crtc_master_sync(struct dc_state *context) +{ + int i = 0; + struct dc_stream_state *stream; + + if (context->stream_count < 2) + return; + for (i = 0; i < context->stream_count ; i++) { + if (!context->streams[i]) + continue; + /* + * TODO: add a function to read AMD VSDB bits and set + * crtc_sync_master.multi_sync_enabled flag + * For now it's set to false + */ + } + + set_master_stream(context->streams, context->stream_count); + + for (i = 0; i < context->stream_count ; i++) { + stream = context->streams[i]; + + if (!stream) + continue; + + set_multisync_trigger_params(stream); + } +} + +/** + * DOC: FreeSync Video + * + * When a userspace application wants to play a video, the content follows a + * standard format definition that usually specifies the FPS for that format. + * The below list illustrates some video format and the expected FPS, + * respectively: + * + * - TV/NTSC (23.976 FPS) + * - Cinema (24 FPS) + * - TV/PAL (25 FPS) + * - TV/NTSC (29.97 FPS) + * - TV/NTSC (30 FPS) + * - Cinema HFR (48 FPS) + * - TV/PAL (50 FPS) + * - Commonly used (60 FPS) + * - Multiples of 24 (48,72,96 FPS) + * + * The list of standards video format is not huge and can be added to the + * connector modeset list beforehand. With that, userspace can leverage + * FreeSync to extends the front porch in order to attain the target refresh + * rate. Such a switch will happen seamlessly, without screen blanking or + * reprogramming of the output in any other way. If the userspace requests a + * modesetting change compatible with FreeSync modes that only differ in the + * refresh rate, DC will skip the full update and avoid blink during the + * transition. For example, the video player can change the modesetting from + * 60Hz to 30Hz for playing TV/NTSC content when it goes full screen without + * causing any display blink. This same concept can be applied to a mode + * setting change. + */ +static struct drm_display_mode * +get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector, + bool use_probed_modes) +{ + struct drm_display_mode *m, *m_pref = NULL; + u16 current_refresh, highest_refresh; + struct list_head *list_head = use_probed_modes ? + &aconnector->base.probed_modes : + &aconnector->base.modes; + + if (aconnector->freesync_vid_base.clock != 0) + return &aconnector->freesync_vid_base; + + /* Find the preferred mode */ + list_for_each_entry(m, list_head, head) { + if (m->type & DRM_MODE_TYPE_PREFERRED) { + m_pref = m; + break; + } + } + + if (!m_pref) { + /* Probably an EDID with no preferred mode. Fallback to first entry */ + m_pref = list_first_entry_or_null( + &aconnector->base.modes, struct drm_display_mode, head); + if (!m_pref) { + DRM_DEBUG_DRIVER("No preferred mode found in EDID\n"); + return NULL; + } + } + + highest_refresh = drm_mode_vrefresh(m_pref); + + /* + * Find the mode with highest refresh rate with same resolution. + * For some monitors, preferred mode is not the mode with highest + * supported refresh rate. + */ + list_for_each_entry(m, list_head, head) { + current_refresh = drm_mode_vrefresh(m); + + if (m->hdisplay == m_pref->hdisplay && + m->vdisplay == m_pref->vdisplay && + highest_refresh < current_refresh) { + highest_refresh = current_refresh; + m_pref = m; + } + } + + drm_mode_copy(&aconnector->freesync_vid_base, m_pref); + return m_pref; +} + +static bool is_freesync_video_mode(const struct drm_display_mode *mode, + struct amdgpu_dm_connector *aconnector) +{ + struct drm_display_mode *high_mode; + int timing_diff; + + high_mode = get_highest_refresh_rate_mode(aconnector, false); + if (!high_mode || !mode) + return false; + + timing_diff = high_mode->vtotal - mode->vtotal; + + if (high_mode->clock == 0 || high_mode->clock != mode->clock || + high_mode->hdisplay != mode->hdisplay || + high_mode->vdisplay != mode->vdisplay || + high_mode->hsync_start != mode->hsync_start || + high_mode->hsync_end != mode->hsync_end || + high_mode->htotal != mode->htotal || + high_mode->hskew != mode->hskew || + high_mode->vscan != mode->vscan || + high_mode->vsync_start - mode->vsync_start != timing_diff || + high_mode->vsync_end - mode->vsync_end != timing_diff) + return false; + else + return true; +} + +static void update_dsc_caps(struct amdgpu_dm_connector *aconnector, + struct dc_sink *sink, struct dc_stream_state *stream, + struct dsc_dec_dpcd_caps *dsc_caps) +{ + stream->timing.flags.DSC = 0; + dsc_caps->is_dsc_supported = false; + + if (aconnector->dc_link && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT || + sink->sink_signal == SIGNAL_TYPE_EDP)) { + if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE || + sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) + dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc, + aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.raw, + aconnector->dc_link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw, + dsc_caps); + } +} + + +static void apply_dsc_policy_for_edp(struct amdgpu_dm_connector *aconnector, + struct dc_sink *sink, struct dc_stream_state *stream, + struct dsc_dec_dpcd_caps *dsc_caps, + uint32_t max_dsc_target_bpp_limit_override) +{ + const struct dc_link_settings *verified_link_cap = NULL; + u32 link_bw_in_kbps; + u32 edp_min_bpp_x16, edp_max_bpp_x16; + struct dc *dc = sink->ctx->dc; + struct dc_dsc_bw_range bw_range = {0}; + struct dc_dsc_config dsc_cfg = {0}; + struct dc_dsc_config_options dsc_options = {0}; + + dc_dsc_get_default_config_option(dc, &dsc_options); + dsc_options.max_target_bpp_limit_override_x16 = max_dsc_target_bpp_limit_override * 16; + + verified_link_cap = dc_link_get_link_cap(stream->link); + link_bw_in_kbps = dc_link_bandwidth_kbps(stream->link, verified_link_cap); + edp_min_bpp_x16 = 8 * 16; + edp_max_bpp_x16 = 8 * 16; + + if (edp_max_bpp_x16 > dsc_caps->edp_max_bits_per_pixel) + edp_max_bpp_x16 = dsc_caps->edp_max_bits_per_pixel; + + if (edp_max_bpp_x16 < edp_min_bpp_x16) + edp_min_bpp_x16 = edp_max_bpp_x16; + + if (dc_dsc_compute_bandwidth_range(dc->res_pool->dscs[0], + dc->debug.dsc_min_slice_height_override, + edp_min_bpp_x16, edp_max_bpp_x16, + dsc_caps, + &stream->timing, + dc_link_get_highest_encoding_format(aconnector->dc_link), + &bw_range)) { + + if (bw_range.max_kbps < link_bw_in_kbps) { + if (dc_dsc_compute_config(dc->res_pool->dscs[0], + dsc_caps, + &dsc_options, + 0, + &stream->timing, + dc_link_get_highest_encoding_format(aconnector->dc_link), + &dsc_cfg)) { + stream->timing.dsc_cfg = dsc_cfg; + stream->timing.flags.DSC = 1; + stream->timing.dsc_cfg.bits_per_pixel = edp_max_bpp_x16; + } + return; + } + } + + if (dc_dsc_compute_config(dc->res_pool->dscs[0], + dsc_caps, + &dsc_options, + link_bw_in_kbps, + &stream->timing, + dc_link_get_highest_encoding_format(aconnector->dc_link), + &dsc_cfg)) { + stream->timing.dsc_cfg = dsc_cfg; + stream->timing.flags.DSC = 1; + } +} + + +static void apply_dsc_policy_for_stream(struct amdgpu_dm_connector *aconnector, + struct dc_sink *sink, struct dc_stream_state *stream, + struct dsc_dec_dpcd_caps *dsc_caps) +{ + struct drm_connector *drm_connector = &aconnector->base; + u32 link_bandwidth_kbps; + struct dc *dc = sink->ctx->dc; + u32 max_supported_bw_in_kbps, timing_bw_in_kbps; + u32 dsc_max_supported_bw_in_kbps; + u32 max_dsc_target_bpp_limit_override = + drm_connector->display_info.max_dsc_bpp; + struct dc_dsc_config_options dsc_options = {0}; + + dc_dsc_get_default_config_option(dc, &dsc_options); + dsc_options.max_target_bpp_limit_override_x16 = max_dsc_target_bpp_limit_override * 16; + + link_bandwidth_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, + dc_link_get_link_cap(aconnector->dc_link)); + + /* Set DSC policy according to dsc_clock_en */ + dc_dsc_policy_set_enable_dsc_when_not_needed( + aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE); + + if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_EDP && + !aconnector->dc_link->panel_config.dsc.disable_dsc_edp && + dc->caps.edp_dsc_support && aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE) { + + apply_dsc_policy_for_edp(aconnector, sink, stream, dsc_caps, max_dsc_target_bpp_limit_override); + + } else if (aconnector->dc_link && sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT) { + if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) { + if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0], + dsc_caps, + &dsc_options, + link_bandwidth_kbps, + &stream->timing, + dc_link_get_highest_encoding_format(aconnector->dc_link), + &stream->timing.dsc_cfg)) { + stream->timing.flags.DSC = 1; + DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from SST RX\n", __func__, drm_connector->name); + } + } else if (sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) { + timing_bw_in_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing, + dc_link_get_highest_encoding_format(aconnector->dc_link)); + max_supported_bw_in_kbps = link_bandwidth_kbps; + dsc_max_supported_bw_in_kbps = link_bandwidth_kbps; + + if (timing_bw_in_kbps > max_supported_bw_in_kbps && + max_supported_bw_in_kbps > 0 && + dsc_max_supported_bw_in_kbps > 0) + if (dc_dsc_compute_config(aconnector->dc_link->ctx->dc->res_pool->dscs[0], + dsc_caps, + &dsc_options, + dsc_max_supported_bw_in_kbps, + &stream->timing, + dc_link_get_highest_encoding_format(aconnector->dc_link), + &stream->timing.dsc_cfg)) { + stream->timing.flags.DSC = 1; + DRM_DEBUG_DRIVER("%s: [%s] DSC is selected from DP-HDMI PCON\n", + __func__, drm_connector->name); + } + } + } + + /* Overwrite the stream flag if DSC is enabled through debugfs */ + if (aconnector->dsc_settings.dsc_force_enable == DSC_CLK_FORCE_ENABLE) + stream->timing.flags.DSC = 1; + + if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_h) + stream->timing.dsc_cfg.num_slices_h = aconnector->dsc_settings.dsc_num_slices_h; + + if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_num_slices_v) + stream->timing.dsc_cfg.num_slices_v = aconnector->dsc_settings.dsc_num_slices_v; + + if (stream->timing.flags.DSC && aconnector->dsc_settings.dsc_bits_per_pixel) + stream->timing.dsc_cfg.bits_per_pixel = aconnector->dsc_settings.dsc_bits_per_pixel; +} + +static struct dc_stream_state * +create_stream_for_sink(struct amdgpu_dm_connector *aconnector, + const struct drm_display_mode *drm_mode, + const struct dm_connector_state *dm_state, + const struct dc_stream_state *old_stream, + int requested_bpc) +{ + struct drm_display_mode *preferred_mode = NULL; + struct drm_connector *drm_connector; + const struct drm_connector_state *con_state = &dm_state->base; + struct dc_stream_state *stream = NULL; + struct drm_display_mode mode; + struct drm_display_mode saved_mode; + struct drm_display_mode *freesync_mode = NULL; + bool native_mode_found = false; + bool recalculate_timing = false; + bool scale = dm_state->scaling != RMX_OFF; + int mode_refresh; + int preferred_refresh = 0; + enum color_transfer_func tf = TRANSFER_FUNC_UNKNOWN; + struct dsc_dec_dpcd_caps dsc_caps; + + struct dc_sink *sink = NULL; + + drm_mode_init(&mode, drm_mode); + memset(&saved_mode, 0, sizeof(saved_mode)); + + if (aconnector == NULL) { + DRM_ERROR("aconnector is NULL!\n"); + return stream; + } + + drm_connector = &aconnector->base; + + if (!aconnector->dc_sink) { + sink = create_fake_sink(aconnector); + if (!sink) + return stream; + } else { + sink = aconnector->dc_sink; + dc_sink_retain(sink); + } + + stream = dc_create_stream_for_sink(sink); + + if (stream == NULL) { + DRM_ERROR("Failed to create stream for sink!\n"); + goto finish; + } + + stream->dm_stream_context = aconnector; + + stream->timing.flags.LTE_340MCSC_SCRAMBLE = + drm_connector->display_info.hdmi.scdc.scrambling.low_rates; + + list_for_each_entry(preferred_mode, &aconnector->base.modes, head) { + /* Search for preferred mode */ + if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) { + native_mode_found = true; + break; + } + } + if (!native_mode_found) + preferred_mode = list_first_entry_or_null( + &aconnector->base.modes, + struct drm_display_mode, + head); + + mode_refresh = drm_mode_vrefresh(&mode); + + if (preferred_mode == NULL) { + /* + * This may not be an error, the use case is when we have no + * usermode calls to reset and set mode upon hotplug. In this + * case, we call set mode ourselves to restore the previous mode + * and the modelist may not be filled in time. + */ + DRM_DEBUG_DRIVER("No preferred mode found\n"); + } else { + recalculate_timing = is_freesync_video_mode(&mode, aconnector); + if (recalculate_timing) { + freesync_mode = get_highest_refresh_rate_mode(aconnector, false); + drm_mode_copy(&saved_mode, &mode); + drm_mode_copy(&mode, freesync_mode); + } else { + decide_crtc_timing_for_drm_display_mode( + &mode, preferred_mode, scale); + + preferred_refresh = drm_mode_vrefresh(preferred_mode); + } + } + + if (recalculate_timing) + drm_mode_set_crtcinfo(&saved_mode, 0); + + /* + * If scaling is enabled and refresh rate didn't change + * we copy the vic and polarities of the old timings + */ + if (!scale || mode_refresh != preferred_refresh) + fill_stream_properties_from_drm_display_mode( + stream, &mode, &aconnector->base, con_state, NULL, + requested_bpc); + else + fill_stream_properties_from_drm_display_mode( + stream, &mode, &aconnector->base, con_state, old_stream, + requested_bpc); + + if (aconnector->timing_changed) { + DC_LOG_DEBUG("%s: overriding timing for automated test, bpc %d, changing to %d\n", + __func__, + stream->timing.display_color_depth, + aconnector->timing_requested->display_color_depth); + stream->timing = *aconnector->timing_requested; + } + + /* SST DSC determination policy */ + update_dsc_caps(aconnector, sink, stream, &dsc_caps); + if (aconnector->dsc_settings.dsc_force_enable != DSC_CLK_FORCE_DISABLE && dsc_caps.is_dsc_supported) + apply_dsc_policy_for_stream(aconnector, sink, stream, &dsc_caps); + + update_stream_scaling_settings(&mode, dm_state, stream); + + fill_audio_info( + &stream->audio_info, + drm_connector, + sink); + + update_stream_signal(stream, sink); + + if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) + mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket); + else if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT || + stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST || + stream->signal == SIGNAL_TYPE_EDP) { + // + // should decide stream support vsc sdp colorimetry capability + // before building vsc info packet + // + stream->use_vsc_sdp_for_colorimetry = false; + if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { + stream->use_vsc_sdp_for_colorimetry = + aconnector->dc_sink->is_vsc_sdp_colorimetry_supported; + } else { + if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) + stream->use_vsc_sdp_for_colorimetry = true; + } + if (stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) + tf = TRANSFER_FUNC_GAMMA_22; + mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf); + + if (stream->link->psr_settings.psr_feature_enabled) + aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY; + } +finish: + dc_sink_release(sink); + + return stream; +} + +static enum drm_connector_status +amdgpu_dm_connector_detect(struct drm_connector *connector, bool force) +{ + bool connected; + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + + /* + * Notes: + * 1. This interface is NOT called in context of HPD irq. + * 2. This interface *is called* in context of user-mode ioctl. Which + * makes it a bad place for *any* MST-related activity. + */ + + if (aconnector->base.force == DRM_FORCE_UNSPECIFIED && + !aconnector->fake_enable) + connected = (aconnector->dc_sink != NULL); + else + connected = (aconnector->base.force == DRM_FORCE_ON || + aconnector->base.force == DRM_FORCE_ON_DIGITAL); + + update_subconnector_property(aconnector); + + return (connected ? connector_status_connected : + connector_status_disconnected); +} + +int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector, + struct drm_connector_state *connector_state, + struct drm_property *property, + uint64_t val) +{ + struct drm_device *dev = connector->dev; + struct amdgpu_device *adev = drm_to_adev(dev); + struct dm_connector_state *dm_old_state = + to_dm_connector_state(connector->state); + struct dm_connector_state *dm_new_state = + to_dm_connector_state(connector_state); + + int ret = -EINVAL; + + if (property == dev->mode_config.scaling_mode_property) { + enum amdgpu_rmx_type rmx_type; + + switch (val) { + case DRM_MODE_SCALE_CENTER: + rmx_type = RMX_CENTER; + break; + case DRM_MODE_SCALE_ASPECT: + rmx_type = RMX_ASPECT; + break; + case DRM_MODE_SCALE_FULLSCREEN: + rmx_type = RMX_FULL; + break; + case DRM_MODE_SCALE_NONE: + default: + rmx_type = RMX_OFF; + break; + } + + if (dm_old_state->scaling == rmx_type) + return 0; + + dm_new_state->scaling = rmx_type; + ret = 0; + } else if (property == adev->mode_info.underscan_hborder_property) { + dm_new_state->underscan_hborder = val; + ret = 0; + } else if (property == adev->mode_info.underscan_vborder_property) { + dm_new_state->underscan_vborder = val; + ret = 0; + } else if (property == adev->mode_info.underscan_property) { + dm_new_state->underscan_enable = val; + ret = 0; + } else if (property == adev->mode_info.abm_level_property) { + dm_new_state->abm_level = val ?: ABM_LEVEL_IMMEDIATE_DISABLE; + ret = 0; + } + + return ret; +} + +int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector, + const struct drm_connector_state *state, + struct drm_property *property, + uint64_t *val) +{ + struct drm_device *dev = connector->dev; + struct amdgpu_device *adev = drm_to_adev(dev); + struct dm_connector_state *dm_state = + to_dm_connector_state(state); + int ret = -EINVAL; + + if (property == dev->mode_config.scaling_mode_property) { + switch (dm_state->scaling) { + case RMX_CENTER: + *val = DRM_MODE_SCALE_CENTER; + break; + case RMX_ASPECT: + *val = DRM_MODE_SCALE_ASPECT; + break; + case RMX_FULL: + *val = DRM_MODE_SCALE_FULLSCREEN; + break; + case RMX_OFF: + default: + *val = DRM_MODE_SCALE_NONE; + break; + } + ret = 0; + } else if (property == adev->mode_info.underscan_hborder_property) { + *val = dm_state->underscan_hborder; + ret = 0; + } else if (property == adev->mode_info.underscan_vborder_property) { + *val = dm_state->underscan_vborder; + ret = 0; + } else if (property == adev->mode_info.underscan_property) { + *val = dm_state->underscan_enable; + ret = 0; + } else if (property == adev->mode_info.abm_level_property) { + *val = (dm_state->abm_level != ABM_LEVEL_IMMEDIATE_DISABLE) ? + dm_state->abm_level : 0; + ret = 0; + } + + return ret; +} + +static void amdgpu_dm_connector_unregister(struct drm_connector *connector) +{ + struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); + + drm_dp_aux_unregister(&amdgpu_dm_connector->dm_dp_aux.aux); +} + +static void amdgpu_dm_connector_destroy(struct drm_connector *connector) +{ + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + struct amdgpu_device *adev = drm_to_adev(connector->dev); + struct amdgpu_display_manager *dm = &adev->dm; + + /* + * Call only if mst_mgr was initialized before since it's not done + * for all connector types. + */ + if (aconnector->mst_mgr.dev) + drm_dp_mst_topology_mgr_destroy(&aconnector->mst_mgr); + + if (aconnector->bl_idx != -1) { + backlight_device_unregister(dm->backlight_dev[aconnector->bl_idx]); + dm->backlight_dev[aconnector->bl_idx] = NULL; + } + + if (aconnector->dc_em_sink) + dc_sink_release(aconnector->dc_em_sink); + aconnector->dc_em_sink = NULL; + if (aconnector->dc_sink) + dc_sink_release(aconnector->dc_sink); + aconnector->dc_sink = NULL; + + drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux); + drm_connector_unregister(connector); + drm_connector_cleanup(connector); + if (aconnector->i2c) { + i2c_del_adapter(&aconnector->i2c->base); + kfree(aconnector->i2c); + } + kfree(aconnector->dm_dp_aux.aux.name); + + kfree(connector); +} + +void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector) +{ + struct dm_connector_state *state = + to_dm_connector_state(connector->state); + + if (connector->state) + __drm_atomic_helper_connector_destroy_state(connector->state); + + kfree(state); + + state = kzalloc(sizeof(*state), GFP_KERNEL); + + if (state) { + state->scaling = RMX_OFF; + state->underscan_enable = false; + state->underscan_hborder = 0; + state->underscan_vborder = 0; + state->base.max_requested_bpc = 8; + state->vcpi_slots = 0; + state->pbn = 0; + + if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) + state->abm_level = amdgpu_dm_abm_level ?: + ABM_LEVEL_IMMEDIATE_DISABLE; + + __drm_atomic_helper_connector_reset(connector, &state->base); + } +} + +struct drm_connector_state * +amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector) +{ + struct dm_connector_state *state = + to_dm_connector_state(connector->state); + + struct dm_connector_state *new_state = + kmemdup(state, sizeof(*state), GFP_KERNEL); + + if (!new_state) + return NULL; + + __drm_atomic_helper_connector_duplicate_state(connector, &new_state->base); + + new_state->freesync_capable = state->freesync_capable; + new_state->abm_level = state->abm_level; + new_state->scaling = state->scaling; + new_state->underscan_enable = state->underscan_enable; + new_state->underscan_hborder = state->underscan_hborder; + new_state->underscan_vborder = state->underscan_vborder; + new_state->vcpi_slots = state->vcpi_slots; + new_state->pbn = state->pbn; + return &new_state->base; +} + +static int +amdgpu_dm_connector_late_register(struct drm_connector *connector) +{ + struct amdgpu_dm_connector *amdgpu_dm_connector = + to_amdgpu_dm_connector(connector); + int r; + + amdgpu_dm_register_backlight_device(amdgpu_dm_connector); + + if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) || + (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) { + amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev; + r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux); + if (r) + return r; + } + +#if defined(CONFIG_DEBUG_FS) + connector_debugfs_init(amdgpu_dm_connector); +#endif + + return 0; +} + +static void amdgpu_dm_connector_funcs_force(struct drm_connector *connector) +{ + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + struct dc_link *dc_link = aconnector->dc_link; + struct dc_sink *dc_em_sink = aconnector->dc_em_sink; + struct edid *edid; + + if (!connector->edid_override) + return; + + drm_edid_override_connector_update(&aconnector->base); + edid = aconnector->base.edid_blob_ptr->data; + aconnector->edid = edid; + + /* Update emulated (virtual) sink's EDID */ + if (dc_em_sink && dc_link) { + memset(&dc_em_sink->edid_caps, 0, sizeof(struct dc_edid_caps)); + memmove(dc_em_sink->dc_edid.raw_edid, edid, (edid->extensions + 1) * EDID_LENGTH); + dm_helpers_parse_edid_caps( + dc_link, + &dc_em_sink->dc_edid, + &dc_em_sink->edid_caps); + } +} + +static const struct drm_connector_funcs amdgpu_dm_connector_funcs = { + .reset = amdgpu_dm_connector_funcs_reset, + .detect = amdgpu_dm_connector_detect, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = amdgpu_dm_connector_destroy, + .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, + .atomic_set_property = amdgpu_dm_connector_atomic_set_property, + .atomic_get_property = amdgpu_dm_connector_atomic_get_property, + .late_register = amdgpu_dm_connector_late_register, + .early_unregister = amdgpu_dm_connector_unregister, + .force = amdgpu_dm_connector_funcs_force +}; + +static int get_modes(struct drm_connector *connector) +{ + return amdgpu_dm_connector_get_modes(connector); +} + +static void create_eml_sink(struct amdgpu_dm_connector *aconnector) +{ + struct dc_sink_init_data init_params = { + .link = aconnector->dc_link, + .sink_signal = SIGNAL_TYPE_VIRTUAL + }; + struct edid *edid; + + if (!aconnector->base.edid_blob_ptr) { + /* if connector->edid_override valid, pass + * it to edid_override to edid_blob_ptr + */ + + drm_edid_override_connector_update(&aconnector->base); + + if (!aconnector->base.edid_blob_ptr) { + DRM_ERROR("No EDID firmware found on connector: %s ,forcing to OFF!\n", + aconnector->base.name); + + aconnector->base.force = DRM_FORCE_OFF; + return; + } + } + + edid = (struct edid *) aconnector->base.edid_blob_ptr->data; + + aconnector->edid = edid; + + aconnector->dc_em_sink = dc_link_add_remote_sink( + aconnector->dc_link, + (uint8_t *)edid, + (edid->extensions + 1) * EDID_LENGTH, + &init_params); + + if (aconnector->base.force == DRM_FORCE_ON) { + aconnector->dc_sink = aconnector->dc_link->local_sink ? + aconnector->dc_link->local_sink : + aconnector->dc_em_sink; + dc_sink_retain(aconnector->dc_sink); + } +} + +static void handle_edid_mgmt(struct amdgpu_dm_connector *aconnector) +{ + struct dc_link *link = (struct dc_link *)aconnector->dc_link; + + /* + * In case of headless boot with force on for DP managed connector + * Those settings have to be != 0 to get initial modeset + */ + if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT) { + link->verified_link_cap.lane_count = LANE_COUNT_FOUR; + link->verified_link_cap.link_rate = LINK_RATE_HIGH2; + } + + create_eml_sink(aconnector); +} + +static enum dc_status dm_validate_stream_and_context(struct dc *dc, + struct dc_stream_state *stream) +{ + enum dc_status dc_result = DC_ERROR_UNEXPECTED; + struct dc_plane_state *dc_plane_state = NULL; + struct dc_state *dc_state = NULL; + + if (!stream) + goto cleanup; + + dc_plane_state = dc_create_plane_state(dc); + if (!dc_plane_state) + goto cleanup; + + dc_state = dc_create_state(dc); + if (!dc_state) + goto cleanup; + + /* populate stream to plane */ + dc_plane_state->src_rect.height = stream->src.height; + dc_plane_state->src_rect.width = stream->src.width; + dc_plane_state->dst_rect.height = stream->src.height; + dc_plane_state->dst_rect.width = stream->src.width; + dc_plane_state->clip_rect.height = stream->src.height; + dc_plane_state->clip_rect.width = stream->src.width; + dc_plane_state->plane_size.surface_pitch = ((stream->src.width + 255) / 256) * 256; + dc_plane_state->plane_size.surface_size.height = stream->src.height; + dc_plane_state->plane_size.surface_size.width = stream->src.width; + dc_plane_state->plane_size.chroma_size.height = stream->src.height; + dc_plane_state->plane_size.chroma_size.width = stream->src.width; + dc_plane_state->format = SURFACE_PIXEL_FORMAT_GRPH_ARGB8888; + dc_plane_state->tiling_info.gfx9.swizzle = DC_SW_UNKNOWN; + dc_plane_state->rotation = ROTATION_ANGLE_0; + dc_plane_state->is_tiling_rotated = false; + dc_plane_state->tiling_info.gfx8.array_mode = DC_ARRAY_LINEAR_GENERAL; + + dc_result = dc_validate_stream(dc, stream); + if (dc_result == DC_OK) + dc_result = dc_validate_plane(dc, dc_plane_state); + + if (dc_result == DC_OK) + dc_result = dc_add_stream_to_ctx(dc, dc_state, stream); + + if (dc_result == DC_OK && !dc_add_plane_to_context( + dc, + stream, + dc_plane_state, + dc_state)) + dc_result = DC_FAIL_ATTACH_SURFACES; + + if (dc_result == DC_OK) + dc_result = dc_validate_global_state(dc, dc_state, true); + +cleanup: + if (dc_state) + dc_release_state(dc_state); + + if (dc_plane_state) + dc_plane_state_release(dc_plane_state); + + return dc_result; +} + +struct dc_stream_state * +create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, + const struct drm_display_mode *drm_mode, + const struct dm_connector_state *dm_state, + const struct dc_stream_state *old_stream) +{ + struct drm_connector *connector = &aconnector->base; + struct amdgpu_device *adev = drm_to_adev(connector->dev); + struct dc_stream_state *stream; + const struct drm_connector_state *drm_state = dm_state ? &dm_state->base : NULL; + int requested_bpc = drm_state ? drm_state->max_requested_bpc : 8; + enum dc_status dc_result = DC_OK; + + do { + stream = create_stream_for_sink(aconnector, drm_mode, + dm_state, old_stream, + requested_bpc); + if (stream == NULL) { + DRM_ERROR("Failed to create stream for sink!\n"); + break; + } + + dc_result = dc_validate_stream(adev->dm.dc, stream); + if (dc_result == DC_OK && stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) + dc_result = dm_dp_mst_is_port_support_mode(aconnector, stream); + + if (dc_result == DC_OK) + dc_result = dm_validate_stream_and_context(adev->dm.dc, stream); + + if (dc_result != DC_OK) { + DRM_DEBUG_KMS("Mode %dx%d (clk %d) failed DC validation with error %d (%s)\n", + drm_mode->hdisplay, + drm_mode->vdisplay, + drm_mode->clock, + dc_result, + dc_status_to_str(dc_result)); + + dc_stream_release(stream); + stream = NULL; + requested_bpc -= 2; /* lower bpc to retry validation */ + } + + } while (stream == NULL && requested_bpc >= 6); + + if (dc_result == DC_FAIL_ENC_VALIDATE && !aconnector->force_yuv420_output) { + DRM_DEBUG_KMS("Retry forcing YCbCr420 encoding\n"); + + aconnector->force_yuv420_output = true; + stream = create_validate_stream_for_sink(aconnector, drm_mode, + dm_state, old_stream); + aconnector->force_yuv420_output = false; + } + + return stream; +} + +enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode) +{ + int result = MODE_ERROR; + struct dc_sink *dc_sink; + /* TODO: Unhardcode stream count */ + struct dc_stream_state *stream; + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + + if ((mode->flags & DRM_MODE_FLAG_INTERLACE) || + (mode->flags & DRM_MODE_FLAG_DBLSCAN)) + return result; + + /* + * Only run this the first time mode_valid is called to initilialize + * EDID mgmt + */ + if (aconnector->base.force != DRM_FORCE_UNSPECIFIED && + !aconnector->dc_em_sink) + handle_edid_mgmt(aconnector); + + dc_sink = to_amdgpu_dm_connector(connector)->dc_sink; + + if (dc_sink == NULL && aconnector->base.force != DRM_FORCE_ON_DIGITAL && + aconnector->base.force != DRM_FORCE_ON) { + DRM_ERROR("dc_sink is NULL!\n"); + goto fail; + } + + drm_mode_set_crtcinfo(mode, 0); + + stream = create_validate_stream_for_sink(aconnector, mode, + to_dm_connector_state(connector->state), + NULL); + if (stream) { + dc_stream_release(stream); + result = MODE_OK; + } + +fail: + /* TODO: error handling*/ + return result; +} + +static int fill_hdr_info_packet(const struct drm_connector_state *state, + struct dc_info_packet *out) +{ + struct hdmi_drm_infoframe frame; + unsigned char buf[30]; /* 26 + 4 */ + ssize_t len; + int ret, i; + + memset(out, 0, sizeof(*out)); + + if (!state->hdr_output_metadata) + return 0; + + ret = drm_hdmi_infoframe_set_hdr_metadata(&frame, state); + if (ret) + return ret; + + len = hdmi_drm_infoframe_pack_only(&frame, buf, sizeof(buf)); + if (len < 0) + return (int)len; + + /* Static metadata is a fixed 26 bytes + 4 byte header. */ + if (len != 30) + return -EINVAL; + + /* Prepare the infopacket for DC. */ + switch (state->connector->connector_type) { + case DRM_MODE_CONNECTOR_HDMIA: + out->hb0 = 0x87; /* type */ + out->hb1 = 0x01; /* version */ + out->hb2 = 0x1A; /* length */ + out->sb[0] = buf[3]; /* checksum */ + i = 1; + break; + + case DRM_MODE_CONNECTOR_DisplayPort: + case DRM_MODE_CONNECTOR_eDP: + out->hb0 = 0x00; /* sdp id, zero */ + out->hb1 = 0x87; /* type */ + out->hb2 = 0x1D; /* payload len - 1 */ + out->hb3 = (0x13 << 2); /* sdp version */ + out->sb[0] = 0x01; /* version */ + out->sb[1] = 0x1A; /* length */ + i = 2; + break; + + default: + return -EINVAL; + } + + memcpy(&out->sb[i], &buf[4], 26); + out->valid = true; + + print_hex_dump(KERN_DEBUG, "HDR SB:", DUMP_PREFIX_NONE, 16, 1, out->sb, + sizeof(out->sb), false); + + return 0; +} + +static int +amdgpu_dm_connector_atomic_check(struct drm_connector *conn, + struct drm_atomic_state *state) +{ + struct drm_connector_state *new_con_state = + drm_atomic_get_new_connector_state(state, conn); + struct drm_connector_state *old_con_state = + drm_atomic_get_old_connector_state(state, conn); + struct drm_crtc *crtc = new_con_state->crtc; + struct drm_crtc_state *new_crtc_state; + struct amdgpu_dm_connector *aconn = to_amdgpu_dm_connector(conn); + int ret; + + trace_amdgpu_dm_connector_atomic_check(new_con_state); + + if (conn->connector_type == DRM_MODE_CONNECTOR_DisplayPort) { + ret = drm_dp_mst_root_conn_atomic_check(new_con_state, &aconn->mst_mgr); + if (ret < 0) + return ret; + } + + if (!crtc) + return 0; + + if (new_con_state->colorspace != old_con_state->colorspace) { + new_crtc_state = drm_atomic_get_crtc_state(state, crtc); + if (IS_ERR(new_crtc_state)) + return PTR_ERR(new_crtc_state); + + new_crtc_state->mode_changed = true; + } + + if (!drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state)) { + struct dc_info_packet hdr_infopacket; + + ret = fill_hdr_info_packet(new_con_state, &hdr_infopacket); + if (ret) + return ret; + + new_crtc_state = drm_atomic_get_crtc_state(state, crtc); + if (IS_ERR(new_crtc_state)) + return PTR_ERR(new_crtc_state); + + /* + * DC considers the stream backends changed if the + * static metadata changes. Forcing the modeset also + * gives a simple way for userspace to switch from + * 8bpc to 10bpc when setting the metadata to enter + * or exit HDR. + * + * Changing the static metadata after it's been + * set is permissible, however. So only force a + * modeset if we're entering or exiting HDR. + */ + new_crtc_state->mode_changed = new_crtc_state->mode_changed || + !old_con_state->hdr_output_metadata || + !new_con_state->hdr_output_metadata; + } + + return 0; +} + +static const struct drm_connector_helper_funcs +amdgpu_dm_connector_helper_funcs = { + /* + * If hotplugging a second bigger display in FB Con mode, bigger resolution + * modes will be filtered by drm_mode_validate_size(), and those modes + * are missing after user start lightdm. So we need to renew modes list. + * in get_modes call back, not just return the modes count + */ + .get_modes = get_modes, + .mode_valid = amdgpu_dm_connector_mode_valid, + .atomic_check = amdgpu_dm_connector_atomic_check, +}; + +static void dm_encoder_helper_disable(struct drm_encoder *encoder) +{ + +} + +int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth) +{ + switch (display_color_depth) { + case COLOR_DEPTH_666: + return 6; + case COLOR_DEPTH_888: + return 8; + case COLOR_DEPTH_101010: + return 10; + case COLOR_DEPTH_121212: + return 12; + case COLOR_DEPTH_141414: + return 14; + case COLOR_DEPTH_161616: + return 16; + default: + break; + } + return 0; +} + +static int dm_encoder_helper_atomic_check(struct drm_encoder *encoder, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct drm_atomic_state *state = crtc_state->state; + struct drm_connector *connector = conn_state->connector; + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + struct dm_connector_state *dm_new_connector_state = to_dm_connector_state(conn_state); + const struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode; + struct drm_dp_mst_topology_mgr *mst_mgr; + struct drm_dp_mst_port *mst_port; + struct drm_dp_mst_topology_state *mst_state; + enum dc_color_depth color_depth; + int clock, bpp = 0; + bool is_y420 = false; + + if (!aconnector->mst_output_port) + return 0; + + mst_port = aconnector->mst_output_port; + mst_mgr = &aconnector->mst_root->mst_mgr; + + if (!crtc_state->connectors_changed && !crtc_state->mode_changed) + return 0; + + mst_state = drm_atomic_get_mst_topology_state(state, mst_mgr); + if (IS_ERR(mst_state)) + return PTR_ERR(mst_state); + + mst_state->pbn_div = dm_mst_get_pbn_divider(aconnector->mst_root->dc_link); + + if (!state->duplicated) { + int max_bpc = conn_state->max_requested_bpc; + + is_y420 = drm_mode_is_420_also(&connector->display_info, adjusted_mode) && + aconnector->force_yuv420_output; + color_depth = convert_color_depth_from_display_info(connector, + is_y420, + max_bpc); + bpp = convert_dc_color_depth_into_bpc(color_depth) * 3; + clock = adjusted_mode->clock; + dm_new_connector_state->pbn = drm_dp_calc_pbn_mode(clock, bpp << 4); + } + + dm_new_connector_state->vcpi_slots = + drm_dp_atomic_find_time_slots(state, mst_mgr, mst_port, + dm_new_connector_state->pbn); + if (dm_new_connector_state->vcpi_slots < 0) { + DRM_DEBUG_ATOMIC("failed finding vcpi slots: %d\n", (int)dm_new_connector_state->vcpi_slots); + return dm_new_connector_state->vcpi_slots; + } + return 0; +} + +const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs = { + .disable = dm_encoder_helper_disable, + .atomic_check = dm_encoder_helper_atomic_check +}; + +static int dm_update_mst_vcpi_slots_for_dsc(struct drm_atomic_state *state, + struct dc_state *dc_state, + struct dsc_mst_fairness_vars *vars) +{ + struct dc_stream_state *stream = NULL; + struct drm_connector *connector; + struct drm_connector_state *new_con_state; + struct amdgpu_dm_connector *aconnector; + struct dm_connector_state *dm_conn_state; + int i, j, ret; + int vcpi, pbn_div, pbn, slot_num = 0; + + for_each_new_connector_in_state(state, connector, new_con_state, i) { + + aconnector = to_amdgpu_dm_connector(connector); + + if (!aconnector->mst_output_port) + continue; + + if (!new_con_state || !new_con_state->crtc) + continue; + + dm_conn_state = to_dm_connector_state(new_con_state); + + for (j = 0; j < dc_state->stream_count; j++) { + stream = dc_state->streams[j]; + if (!stream) + continue; + + if ((struct amdgpu_dm_connector *)stream->dm_stream_context == aconnector) + break; + + stream = NULL; + } + + if (!stream) + continue; + + pbn_div = dm_mst_get_pbn_divider(stream->link); + /* pbn is calculated by compute_mst_dsc_configs_for_state*/ + for (j = 0; j < dc_state->stream_count; j++) { + if (vars[j].aconnector == aconnector) { + pbn = vars[j].pbn; + break; + } + } + + if (j == dc_state->stream_count) + continue; + + slot_num = DIV_ROUND_UP(pbn, pbn_div); + + if (stream->timing.flags.DSC != 1) { + dm_conn_state->pbn = pbn; + dm_conn_state->vcpi_slots = slot_num; + + ret = drm_dp_mst_atomic_enable_dsc(state, aconnector->mst_output_port, + dm_conn_state->pbn, false); + if (ret < 0) + return ret; + + continue; + } + + vcpi = drm_dp_mst_atomic_enable_dsc(state, aconnector->mst_output_port, pbn, true); + if (vcpi < 0) + return vcpi; + + dm_conn_state->pbn = pbn; + dm_conn_state->vcpi_slots = vcpi; + } + return 0; +} + +static int to_drm_connector_type(enum signal_type st) +{ + switch (st) { + case SIGNAL_TYPE_HDMI_TYPE_A: + return DRM_MODE_CONNECTOR_HDMIA; + case SIGNAL_TYPE_EDP: + return DRM_MODE_CONNECTOR_eDP; + case SIGNAL_TYPE_LVDS: + return DRM_MODE_CONNECTOR_LVDS; + case SIGNAL_TYPE_RGB: + return DRM_MODE_CONNECTOR_VGA; + case SIGNAL_TYPE_DISPLAY_PORT: + case SIGNAL_TYPE_DISPLAY_PORT_MST: + return DRM_MODE_CONNECTOR_DisplayPort; + case SIGNAL_TYPE_DVI_DUAL_LINK: + case SIGNAL_TYPE_DVI_SINGLE_LINK: + return DRM_MODE_CONNECTOR_DVID; + case SIGNAL_TYPE_VIRTUAL: + return DRM_MODE_CONNECTOR_VIRTUAL; + + default: + return DRM_MODE_CONNECTOR_Unknown; + } +} + +static struct drm_encoder *amdgpu_dm_connector_to_encoder(struct drm_connector *connector) +{ + struct drm_encoder *encoder; + + /* There is only one encoder per connector */ + drm_connector_for_each_possible_encoder(connector, encoder) + return encoder; + + return NULL; +} + +static void amdgpu_dm_get_native_mode(struct drm_connector *connector) +{ + struct drm_encoder *encoder; + struct amdgpu_encoder *amdgpu_encoder; + + encoder = amdgpu_dm_connector_to_encoder(connector); + + if (encoder == NULL) + return; + + amdgpu_encoder = to_amdgpu_encoder(encoder); + + amdgpu_encoder->native_mode.clock = 0; + + if (!list_empty(&connector->probed_modes)) { + struct drm_display_mode *preferred_mode = NULL; + + list_for_each_entry(preferred_mode, + &connector->probed_modes, + head) { + if (preferred_mode->type & DRM_MODE_TYPE_PREFERRED) + amdgpu_encoder->native_mode = *preferred_mode; + + break; + } + + } +} + +static struct drm_display_mode * +amdgpu_dm_create_common_mode(struct drm_encoder *encoder, + char *name, + int hdisplay, int vdisplay) +{ + struct drm_device *dev = encoder->dev; + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + struct drm_display_mode *mode = NULL; + struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode; + + mode = drm_mode_duplicate(dev, native_mode); + + if (mode == NULL) + return NULL; + + mode->hdisplay = hdisplay; + mode->vdisplay = vdisplay; + mode->type &= ~DRM_MODE_TYPE_PREFERRED; + strscpy(mode->name, name, DRM_DISPLAY_MODE_LEN); + + return mode; + +} + +static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder, + struct drm_connector *connector) +{ + struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder); + struct drm_display_mode *mode = NULL; + struct drm_display_mode *native_mode = &amdgpu_encoder->native_mode; + struct amdgpu_dm_connector *amdgpu_dm_connector = + to_amdgpu_dm_connector(connector); + int i; + int n; + struct mode_size { + char name[DRM_DISPLAY_MODE_LEN]; + int w; + int h; + } common_modes[] = { + { "640x480", 640, 480}, + { "800x600", 800, 600}, + { "1024x768", 1024, 768}, + { "1280x720", 1280, 720}, + { "1280x800", 1280, 800}, + {"1280x1024", 1280, 1024}, + { "1440x900", 1440, 900}, + {"1680x1050", 1680, 1050}, + {"1600x1200", 1600, 1200}, + {"1920x1080", 1920, 1080}, + {"1920x1200", 1920, 1200} + }; + + n = ARRAY_SIZE(common_modes); + + for (i = 0; i < n; i++) { + struct drm_display_mode *curmode = NULL; + bool mode_existed = false; + + if (common_modes[i].w > native_mode->hdisplay || + common_modes[i].h > native_mode->vdisplay || + (common_modes[i].w == native_mode->hdisplay && + common_modes[i].h == native_mode->vdisplay)) + continue; + + list_for_each_entry(curmode, &connector->probed_modes, head) { + if (common_modes[i].w == curmode->hdisplay && + common_modes[i].h == curmode->vdisplay) { + mode_existed = true; + break; + } + } + + if (mode_existed) + continue; + + mode = amdgpu_dm_create_common_mode(encoder, + common_modes[i].name, common_modes[i].w, + common_modes[i].h); + if (!mode) + continue; + + drm_mode_probed_add(connector, mode); + amdgpu_dm_connector->num_modes++; + } +} + +static void amdgpu_set_panel_orientation(struct drm_connector *connector) +{ + struct drm_encoder *encoder; + struct amdgpu_encoder *amdgpu_encoder; + const struct drm_display_mode *native_mode; + + if (connector->connector_type != DRM_MODE_CONNECTOR_eDP && + connector->connector_type != DRM_MODE_CONNECTOR_LVDS) + return; + + mutex_lock(&connector->dev->mode_config.mutex); + amdgpu_dm_connector_get_modes(connector); + mutex_unlock(&connector->dev->mode_config.mutex); + + encoder = amdgpu_dm_connector_to_encoder(connector); + if (!encoder) + return; + + amdgpu_encoder = to_amdgpu_encoder(encoder); + + native_mode = &amdgpu_encoder->native_mode; + if (native_mode->hdisplay == 0 || native_mode->vdisplay == 0) + return; + + drm_connector_set_panel_orientation_with_quirk(connector, + DRM_MODE_PANEL_ORIENTATION_UNKNOWN, + native_mode->hdisplay, + native_mode->vdisplay); +} + +static void amdgpu_dm_connector_ddc_get_modes(struct drm_connector *connector, + struct edid *edid) +{ + struct amdgpu_dm_connector *amdgpu_dm_connector = + to_amdgpu_dm_connector(connector); + + if (edid) { + /* empty probed_modes */ + INIT_LIST_HEAD(&connector->probed_modes); + amdgpu_dm_connector->num_modes = + drm_add_edid_modes(connector, edid); + + /* sorting the probed modes before calling function + * amdgpu_dm_get_native_mode() since EDID can have + * more than one preferred mode. The modes that are + * later in the probed mode list could be of higher + * and preferred resolution. For example, 3840x2160 + * resolution in base EDID preferred timing and 4096x2160 + * preferred resolution in DID extension block later. + */ + drm_mode_sort(&connector->probed_modes); + amdgpu_dm_get_native_mode(connector); + + /* Freesync capabilities are reset by calling + * drm_add_edid_modes() and need to be + * restored here. + */ + amdgpu_dm_update_freesync_caps(connector, edid); + } else { + amdgpu_dm_connector->num_modes = 0; + } +} + +static bool is_duplicate_mode(struct amdgpu_dm_connector *aconnector, + struct drm_display_mode *mode) +{ + struct drm_display_mode *m; + + list_for_each_entry(m, &aconnector->base.probed_modes, head) { + if (drm_mode_equal(m, mode)) + return true; + } + + return false; +} + +static uint add_fs_modes(struct amdgpu_dm_connector *aconnector) +{ + const struct drm_display_mode *m; + struct drm_display_mode *new_mode; + uint i; + u32 new_modes_count = 0; + + /* Standard FPS values + * + * 23.976 - TV/NTSC + * 24 - Cinema + * 25 - TV/PAL + * 29.97 - TV/NTSC + * 30 - TV/NTSC + * 48 - Cinema HFR + * 50 - TV/PAL + * 60 - Commonly used + * 48,72,96,120 - Multiples of 24 + */ + static const u32 common_rates[] = { + 23976, 24000, 25000, 29970, 30000, + 48000, 50000, 60000, 72000, 96000, 120000 + }; + + /* + * Find mode with highest refresh rate with the same resolution + * as the preferred mode. Some monitors report a preferred mode + * with lower resolution than the highest refresh rate supported. + */ + + m = get_highest_refresh_rate_mode(aconnector, true); + if (!m) + return 0; + + for (i = 0; i < ARRAY_SIZE(common_rates); i++) { + u64 target_vtotal, target_vtotal_diff; + u64 num, den; + + if (drm_mode_vrefresh(m) * 1000 < common_rates[i]) + continue; + + if (common_rates[i] < aconnector->min_vfreq * 1000 || + common_rates[i] > aconnector->max_vfreq * 1000) + continue; + + num = (unsigned long long)m->clock * 1000 * 1000; + den = common_rates[i] * (unsigned long long)m->htotal; + target_vtotal = div_u64(num, den); + target_vtotal_diff = target_vtotal - m->vtotal; + + /* Check for illegal modes */ + if (m->vsync_start + target_vtotal_diff < m->vdisplay || + m->vsync_end + target_vtotal_diff < m->vsync_start || + m->vtotal + target_vtotal_diff < m->vsync_end) + continue; + + new_mode = drm_mode_duplicate(aconnector->base.dev, m); + if (!new_mode) + goto out; + + new_mode->vtotal += (u16)target_vtotal_diff; + new_mode->vsync_start += (u16)target_vtotal_diff; + new_mode->vsync_end += (u16)target_vtotal_diff; + new_mode->type &= ~DRM_MODE_TYPE_PREFERRED; + new_mode->type |= DRM_MODE_TYPE_DRIVER; + + if (!is_duplicate_mode(aconnector, new_mode)) { + drm_mode_probed_add(&aconnector->base, new_mode); + new_modes_count += 1; + } else + drm_mode_destroy(aconnector->base.dev, new_mode); + } + out: + return new_modes_count; +} + +static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connector, + struct edid *edid) +{ + struct amdgpu_dm_connector *amdgpu_dm_connector = + to_amdgpu_dm_connector(connector); + + if (!edid) + return; + + if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) + amdgpu_dm_connector->num_modes += + add_fs_modes(amdgpu_dm_connector); +} + +static int amdgpu_dm_connector_get_modes(struct drm_connector *connector) +{ + struct amdgpu_dm_connector *amdgpu_dm_connector = + to_amdgpu_dm_connector(connector); + struct drm_encoder *encoder; + struct edid *edid = amdgpu_dm_connector->edid; + struct dc_link_settings *verified_link_cap = + &amdgpu_dm_connector->dc_link->verified_link_cap; + const struct dc *dc = amdgpu_dm_connector->dc_link->dc; + + encoder = amdgpu_dm_connector_to_encoder(connector); + + if (!drm_edid_is_valid(edid)) { + amdgpu_dm_connector->num_modes = + drm_add_modes_noedid(connector, 640, 480); + if (dc->link_srv->dp_get_encoding_format(verified_link_cap) == DP_128b_132b_ENCODING) + amdgpu_dm_connector->num_modes += + drm_add_modes_noedid(connector, 1920, 1080); + } else { + amdgpu_dm_connector_ddc_get_modes(connector, edid); + amdgpu_dm_connector_add_common_modes(encoder, connector); + amdgpu_dm_connector_add_freesync_modes(connector, edid); + } + amdgpu_dm_fbc_init(connector); + + return amdgpu_dm_connector->num_modes; +} + +static const u32 supported_colorspaces = + BIT(DRM_MODE_COLORIMETRY_BT709_YCC) | + BIT(DRM_MODE_COLORIMETRY_OPRGB) | + BIT(DRM_MODE_COLORIMETRY_BT2020_RGB) | + BIT(DRM_MODE_COLORIMETRY_BT2020_YCC); + +void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, + struct amdgpu_dm_connector *aconnector, + int connector_type, + struct dc_link *link, + int link_index) +{ + struct amdgpu_device *adev = drm_to_adev(dm->ddev); + + /* + * Some of the properties below require access to state, like bpc. + * Allocate some default initial connector state with our reset helper. + */ + if (aconnector->base.funcs->reset) + aconnector->base.funcs->reset(&aconnector->base); + + aconnector->connector_id = link_index; + aconnector->bl_idx = -1; + aconnector->dc_link = link; + aconnector->base.interlace_allowed = false; + aconnector->base.doublescan_allowed = false; + aconnector->base.stereo_allowed = false; + aconnector->base.dpms = DRM_MODE_DPMS_OFF; + aconnector->hpd.hpd = AMDGPU_HPD_NONE; /* not used */ + aconnector->audio_inst = -1; + aconnector->pack_sdp_v1_3 = false; + aconnector->as_type = ADAPTIVE_SYNC_TYPE_NONE; + memset(&aconnector->vsdb_info, 0, sizeof(aconnector->vsdb_info)); + mutex_init(&aconnector->hpd_lock); + mutex_init(&aconnector->handle_mst_msg_ready); + + /* + * configure support HPD hot plug connector_>polled default value is 0 + * which means HPD hot plug not supported + */ + switch (connector_type) { + case DRM_MODE_CONNECTOR_HDMIA: + aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; + aconnector->base.ycbcr_420_allowed = + link->link_enc->features.hdmi_ycbcr420_supported ? true : false; + break; + case DRM_MODE_CONNECTOR_DisplayPort: + aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; + link->link_enc = link_enc_cfg_get_link_enc(link); + ASSERT(link->link_enc); + if (link->link_enc) + aconnector->base.ycbcr_420_allowed = + link->link_enc->features.dp_ycbcr420_supported ? true : false; + break; + case DRM_MODE_CONNECTOR_DVID: + aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; + break; + default: + break; + } + + drm_object_attach_property(&aconnector->base.base, + dm->ddev->mode_config.scaling_mode_property, + DRM_MODE_SCALE_NONE); + + drm_object_attach_property(&aconnector->base.base, + adev->mode_info.underscan_property, + UNDERSCAN_OFF); + drm_object_attach_property(&aconnector->base.base, + adev->mode_info.underscan_hborder_property, + 0); + drm_object_attach_property(&aconnector->base.base, + adev->mode_info.underscan_vborder_property, + 0); + + if (!aconnector->mst_root) + drm_connector_attach_max_bpc_property(&aconnector->base, 8, 16); + + aconnector->base.state->max_bpc = 16; + aconnector->base.state->max_requested_bpc = aconnector->base.state->max_bpc; + + if (connector_type == DRM_MODE_CONNECTOR_eDP && + (dc_is_dmcu_initialized(adev->dm.dc) || adev->dm.dc->ctx->dmub_srv)) { + drm_object_attach_property(&aconnector->base.base, + adev->mode_info.abm_level_property, 0); + } + + if (connector_type == DRM_MODE_CONNECTOR_HDMIA) { + if (!drm_mode_create_hdmi_colorspace_property(&aconnector->base, supported_colorspaces)) + drm_connector_attach_colorspace_property(&aconnector->base); + } else if ((connector_type == DRM_MODE_CONNECTOR_DisplayPort && !aconnector->mst_root) || + connector_type == DRM_MODE_CONNECTOR_eDP) { + if (!drm_mode_create_dp_colorspace_property(&aconnector->base, supported_colorspaces)) + drm_connector_attach_colorspace_property(&aconnector->base); + } + + if (connector_type == DRM_MODE_CONNECTOR_HDMIA || + connector_type == DRM_MODE_CONNECTOR_DisplayPort || + connector_type == DRM_MODE_CONNECTOR_eDP) { + drm_connector_attach_hdr_output_metadata_property(&aconnector->base); + + if (!aconnector->mst_root) + drm_connector_attach_vrr_capable_property(&aconnector->base); + + if (adev->dm.hdcp_workqueue) + drm_connector_attach_content_protection_property(&aconnector->base, true); + } +} + +static int amdgpu_dm_i2c_xfer(struct i2c_adapter *i2c_adap, + struct i2c_msg *msgs, int num) +{ + struct amdgpu_i2c_adapter *i2c = i2c_get_adapdata(i2c_adap); + struct ddc_service *ddc_service = i2c->ddc_service; + struct i2c_command cmd; + int i; + int result = -EIO; + + if (!ddc_service->ddc_pin || !ddc_service->ddc_pin->hw_info.hw_supported) + return result; + + cmd.payloads = kcalloc(num, sizeof(struct i2c_payload), GFP_KERNEL); + + if (!cmd.payloads) + return result; + + cmd.number_of_payloads = num; + cmd.engine = I2C_COMMAND_ENGINE_DEFAULT; + cmd.speed = 100; + + for (i = 0; i < num; i++) { + cmd.payloads[i].write = !(msgs[i].flags & I2C_M_RD); + cmd.payloads[i].address = msgs[i].addr; + cmd.payloads[i].length = msgs[i].len; + cmd.payloads[i].data = msgs[i].buf; + } + + if (dc_submit_i2c( + ddc_service->ctx->dc, + ddc_service->link->link_index, + &cmd)) + result = num; + + kfree(cmd.payloads); + return result; +} + +static u32 amdgpu_dm_i2c_func(struct i2c_adapter *adap) +{ + return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; +} + +static const struct i2c_algorithm amdgpu_dm_i2c_algo = { + .master_xfer = amdgpu_dm_i2c_xfer, + .functionality = amdgpu_dm_i2c_func, +}; + +static struct amdgpu_i2c_adapter * +create_i2c(struct ddc_service *ddc_service, + int link_index, + int *res) +{ + struct amdgpu_device *adev = ddc_service->ctx->driver_context; + struct amdgpu_i2c_adapter *i2c; + + i2c = kzalloc(sizeof(struct amdgpu_i2c_adapter), GFP_KERNEL); + if (!i2c) + return NULL; + i2c->base.owner = THIS_MODULE; + i2c->base.class = I2C_CLASS_DDC; + i2c->base.dev.parent = &adev->pdev->dev; + i2c->base.algo = &amdgpu_dm_i2c_algo; + snprintf(i2c->base.name, sizeof(i2c->base.name), "AMDGPU DM i2c hw bus %d", link_index); + i2c_set_adapdata(&i2c->base, i2c); + i2c->ddc_service = ddc_service; + + return i2c; +} + + +/* + * Note: this function assumes that dc_link_detect() was called for the + * dc_link which will be represented by this aconnector. + */ +static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm, + struct amdgpu_dm_connector *aconnector, + u32 link_index, + struct amdgpu_encoder *aencoder) +{ + int res = 0; + int connector_type; + struct dc *dc = dm->dc; + struct dc_link *link = dc_get_link_at_index(dc, link_index); + struct amdgpu_i2c_adapter *i2c; + + link->priv = aconnector; + + + i2c = create_i2c(link->ddc, link->link_index, &res); + if (!i2c) { + DRM_ERROR("Failed to create i2c adapter data\n"); + return -ENOMEM; + } + + aconnector->i2c = i2c; + res = i2c_add_adapter(&i2c->base); + + if (res) { + DRM_ERROR("Failed to register hw i2c %d\n", link->link_index); + goto out_free; + } + + connector_type = to_drm_connector_type(link->connector_signal); + + res = drm_connector_init_with_ddc( + dm->ddev, + &aconnector->base, + &amdgpu_dm_connector_funcs, + connector_type, + &i2c->base); + + if (res) { + DRM_ERROR("connector_init failed\n"); + aconnector->connector_id = -1; + goto out_free; + } + + drm_connector_helper_add( + &aconnector->base, + &amdgpu_dm_connector_helper_funcs); + + amdgpu_dm_connector_init_helper( + dm, + aconnector, + connector_type, + link, + link_index); + + drm_connector_attach_encoder( + &aconnector->base, &aencoder->base); + + if (connector_type == DRM_MODE_CONNECTOR_DisplayPort + || connector_type == DRM_MODE_CONNECTOR_eDP) + amdgpu_dm_initialize_dp_connector(dm, aconnector, link->link_index); + +out_free: + if (res) { + kfree(i2c); + aconnector->i2c = NULL; + } + return res; +} + +int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev) +{ + switch (adev->mode_info.num_crtc) { + case 1: + return 0x1; + case 2: + return 0x3; + case 3: + return 0x7; + case 4: + return 0xf; + case 5: + return 0x1f; + case 6: + default: + return 0x3f; + } +} + +static int amdgpu_dm_encoder_init(struct drm_device *dev, + struct amdgpu_encoder *aencoder, + uint32_t link_index) +{ + struct amdgpu_device *adev = drm_to_adev(dev); + + int res = drm_encoder_init(dev, + &aencoder->base, + &amdgpu_dm_encoder_funcs, + DRM_MODE_ENCODER_TMDS, + NULL); + + aencoder->base.possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev); + + if (!res) + aencoder->encoder_id = link_index; + else + aencoder->encoder_id = -1; + + drm_encoder_helper_add(&aencoder->base, &amdgpu_dm_encoder_helper_funcs); + + return res; +} + +static void manage_dm_interrupts(struct amdgpu_device *adev, + struct amdgpu_crtc *acrtc, + bool enable) +{ + /* + * We have no guarantee that the frontend index maps to the same + * backend index - some even map to more than one. + * + * TODO: Use a different interrupt or check DC itself for the mapping. + */ + int irq_type = + amdgpu_display_crtc_idx_to_irq_type( + adev, + acrtc->crtc_id); + + if (enable) { + drm_crtc_vblank_on(&acrtc->base); + amdgpu_irq_get( + adev, + &adev->pageflip_irq, + irq_type); +#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) + amdgpu_irq_get( + adev, + &adev->vline0_irq, + irq_type); +#endif + } else { +#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) + amdgpu_irq_put( + adev, + &adev->vline0_irq, + irq_type); +#endif + amdgpu_irq_put( + adev, + &adev->pageflip_irq, + irq_type); + drm_crtc_vblank_off(&acrtc->base); + } +} + +static void dm_update_pflip_irq_state(struct amdgpu_device *adev, + struct amdgpu_crtc *acrtc) +{ + int irq_type = + amdgpu_display_crtc_idx_to_irq_type(adev, acrtc->crtc_id); + + /** + * This reads the current state for the IRQ and force reapplies + * the setting to hardware. + */ + amdgpu_irq_update(adev, &adev->pageflip_irq, irq_type); +} + +static bool +is_scaling_state_different(const struct dm_connector_state *dm_state, + const struct dm_connector_state *old_dm_state) +{ + if (dm_state->scaling != old_dm_state->scaling) + return true; + if (!dm_state->underscan_enable && old_dm_state->underscan_enable) { + if (old_dm_state->underscan_hborder != 0 && old_dm_state->underscan_vborder != 0) + return true; + } else if (dm_state->underscan_enable && !old_dm_state->underscan_enable) { + if (dm_state->underscan_hborder != 0 && dm_state->underscan_vborder != 0) + return true; + } else if (dm_state->underscan_hborder != old_dm_state->underscan_hborder || + dm_state->underscan_vborder != old_dm_state->underscan_vborder) + return true; + return false; +} + +static bool is_content_protection_different(struct drm_crtc_state *new_crtc_state, + struct drm_crtc_state *old_crtc_state, + struct drm_connector_state *new_conn_state, + struct drm_connector_state *old_conn_state, + const struct drm_connector *connector, + struct hdcp_workqueue *hdcp_w) +{ + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state); + + pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n", + connector->index, connector->status, connector->dpms); + pr_debug("[HDCP_DM] state protection old: %x new: %x\n", + old_conn_state->content_protection, new_conn_state->content_protection); + + if (old_crtc_state) + pr_debug("[HDCP_DM] old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", + old_crtc_state->enable, + old_crtc_state->active, + old_crtc_state->mode_changed, + old_crtc_state->active_changed, + old_crtc_state->connectors_changed); + + if (new_crtc_state) + pr_debug("[HDCP_DM] NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", + new_crtc_state->enable, + new_crtc_state->active, + new_crtc_state->mode_changed, + new_crtc_state->active_changed, + new_crtc_state->connectors_changed); + + /* hdcp content type change */ + if (old_conn_state->hdcp_content_type != new_conn_state->hdcp_content_type && + new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_UNDESIRED) { + new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; + pr_debug("[HDCP_DM] Type0/1 change %s :true\n", __func__); + return true; + } + + /* CP is being re enabled, ignore this */ + if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED && + new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { + if (new_crtc_state && new_crtc_state->mode_changed) { + new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; + pr_debug("[HDCP_DM] ENABLED->DESIRED & mode_changed %s :true\n", __func__); + return true; + } + new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_ENABLED; + pr_debug("[HDCP_DM] ENABLED -> DESIRED %s :false\n", __func__); + return false; + } + + /* S3 resume case, since old state will always be 0 (UNDESIRED) and the restored state will be ENABLED + * + * Handles: UNDESIRED -> ENABLED + */ + if (old_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_UNDESIRED && + new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) + new_conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; + + /* Stream removed and re-enabled + * + * Can sometimes overlap with the HPD case, + * thus set update_hdcp to false to avoid + * setting HDCP multiple times. + * + * Handles: DESIRED -> DESIRED (Special case) + */ + if (!(old_conn_state->crtc && old_conn_state->crtc->enabled) && + new_conn_state->crtc && new_conn_state->crtc->enabled && + connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) { + dm_con_state->update_hdcp = false; + pr_debug("[HDCP_DM] DESIRED->DESIRED (Stream removed and re-enabled) %s :true\n", + __func__); + return true; + } + + /* Hot-plug, headless s3, dpms + * + * Only start HDCP if the display is connected/enabled. + * update_hdcp flag will be set to false until the next + * HPD comes in. + * + * Handles: DESIRED -> DESIRED (Special case) + */ + if (dm_con_state->update_hdcp && + new_conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED && + connector->dpms == DRM_MODE_DPMS_ON && aconnector->dc_sink != NULL) { + dm_con_state->update_hdcp = false; + pr_debug("[HDCP_DM] DESIRED->DESIRED (Hot-plug, headless s3, dpms) %s :true\n", + __func__); + return true; + } + + if (old_conn_state->content_protection == new_conn_state->content_protection) { + if (new_conn_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED) { + if (new_crtc_state && new_crtc_state->mode_changed) { + pr_debug("[HDCP_DM] DESIRED->DESIRED or ENABLE->ENABLE mode_change %s :true\n", + __func__); + return true; + } + pr_debug("[HDCP_DM] DESIRED->DESIRED & ENABLE->ENABLE %s :false\n", + __func__); + return false; + } + + pr_debug("[HDCP_DM] UNDESIRED->UNDESIRED %s :false\n", __func__); + return false; + } + + if (new_conn_state->content_protection != DRM_MODE_CONTENT_PROTECTION_ENABLED) { + pr_debug("[HDCP_DM] UNDESIRED->DESIRED or DESIRED->UNDESIRED or ENABLED->UNDESIRED %s :true\n", + __func__); + return true; + } + + pr_debug("[HDCP_DM] DESIRED->ENABLED %s :false\n", __func__); + return false; +} + +static void remove_stream(struct amdgpu_device *adev, + struct amdgpu_crtc *acrtc, + struct dc_stream_state *stream) +{ + /* this is the update mode case */ + + acrtc->otg_inst = -1; + acrtc->enabled = false; +} + +static void prepare_flip_isr(struct amdgpu_crtc *acrtc) +{ + + assert_spin_locked(&acrtc->base.dev->event_lock); + WARN_ON(acrtc->event); + + acrtc->event = acrtc->base.state->event; + + /* Set the flip status */ + acrtc->pflip_status = AMDGPU_FLIP_SUBMITTED; + + /* Mark this event as consumed */ + acrtc->base.state->event = NULL; + + DC_LOG_PFLIP("crtc:%d, pflip_stat:AMDGPU_FLIP_SUBMITTED\n", + acrtc->crtc_id); +} + +static void update_freesync_state_on_stream( + struct amdgpu_display_manager *dm, + struct dm_crtc_state *new_crtc_state, + struct dc_stream_state *new_stream, + struct dc_plane_state *surface, + u32 flip_timestamp_in_us) +{ + struct mod_vrr_params vrr_params; + struct dc_info_packet vrr_infopacket = {0}; + struct amdgpu_device *adev = dm->adev; + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc); + unsigned long flags; + bool pack_sdp_v1_3 = false; + struct amdgpu_dm_connector *aconn; + enum vrr_packet_type packet_type = PACKET_TYPE_VRR; + + if (!new_stream) + return; + + /* + * TODO: Determine why min/max totals and vrefresh can be 0 here. + * For now it's sufficient to just guard against these conditions. + */ + + if (!new_stream->timing.h_total || !new_stream->timing.v_total) + return; + + spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); + vrr_params = acrtc->dm_irq_params.vrr_params; + + if (surface) { + mod_freesync_handle_preflip( + dm->freesync_module, + surface, + new_stream, + flip_timestamp_in_us, + &vrr_params); + + if (adev->family < AMDGPU_FAMILY_AI && + amdgpu_dm_crtc_vrr_active(new_crtc_state)) { + mod_freesync_handle_v_update(dm->freesync_module, + new_stream, &vrr_params); + + /* Need to call this before the frame ends. */ + dc_stream_adjust_vmin_vmax(dm->dc, + new_crtc_state->stream, + &vrr_params.adjust); + } + } + + aconn = (struct amdgpu_dm_connector *)new_stream->dm_stream_context; + + if (aconn && (aconn->as_type == FREESYNC_TYPE_PCON_IN_WHITELIST || aconn->vsdb_info.replay_mode)) { + pack_sdp_v1_3 = aconn->pack_sdp_v1_3; + + if (aconn->vsdb_info.amd_vsdb_version == 1) + packet_type = PACKET_TYPE_FS_V1; + else if (aconn->vsdb_info.amd_vsdb_version == 2) + packet_type = PACKET_TYPE_FS_V2; + else if (aconn->vsdb_info.amd_vsdb_version == 3) + packet_type = PACKET_TYPE_FS_V3; + + mod_build_adaptive_sync_infopacket(new_stream, aconn->as_type, NULL, + &new_stream->adaptive_sync_infopacket); + } + + mod_freesync_build_vrr_infopacket( + dm->freesync_module, + new_stream, + &vrr_params, + packet_type, + TRANSFER_FUNC_UNKNOWN, + &vrr_infopacket, + pack_sdp_v1_3); + + new_crtc_state->freesync_vrr_info_changed |= + (memcmp(&new_crtc_state->vrr_infopacket, + &vrr_infopacket, + sizeof(vrr_infopacket)) != 0); + + acrtc->dm_irq_params.vrr_params = vrr_params; + new_crtc_state->vrr_infopacket = vrr_infopacket; + + new_stream->vrr_infopacket = vrr_infopacket; + new_stream->allow_freesync = mod_freesync_get_freesync_enabled(&vrr_params); + + if (new_crtc_state->freesync_vrr_info_changed) + DRM_DEBUG_KMS("VRR packet update: crtc=%u enabled=%d state=%d", + new_crtc_state->base.crtc->base.id, + (int)new_crtc_state->base.vrr_enabled, + (int)vrr_params.state); + + spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); +} + +static void update_stream_irq_parameters( + struct amdgpu_display_manager *dm, + struct dm_crtc_state *new_crtc_state) +{ + struct dc_stream_state *new_stream = new_crtc_state->stream; + struct mod_vrr_params vrr_params; + struct mod_freesync_config config = new_crtc_state->freesync_config; + struct amdgpu_device *adev = dm->adev; + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(new_crtc_state->base.crtc); + unsigned long flags; + + if (!new_stream) + return; + + /* + * TODO: Determine why min/max totals and vrefresh can be 0 here. + * For now it's sufficient to just guard against these conditions. + */ + if (!new_stream->timing.h_total || !new_stream->timing.v_total) + return; + + spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); + vrr_params = acrtc->dm_irq_params.vrr_params; + + if (new_crtc_state->vrr_supported && + config.min_refresh_in_uhz && + config.max_refresh_in_uhz) { + /* + * if freesync compatible mode was set, config.state will be set + * in atomic check + */ + if (config.state == VRR_STATE_ACTIVE_FIXED && config.fixed_refresh_in_uhz && + (!drm_atomic_crtc_needs_modeset(&new_crtc_state->base) || + new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED)) { + vrr_params.max_refresh_in_uhz = config.max_refresh_in_uhz; + vrr_params.min_refresh_in_uhz = config.min_refresh_in_uhz; + vrr_params.fixed_refresh_in_uhz = config.fixed_refresh_in_uhz; + vrr_params.state = VRR_STATE_ACTIVE_FIXED; + } else { + config.state = new_crtc_state->base.vrr_enabled ? + VRR_STATE_ACTIVE_VARIABLE : + VRR_STATE_INACTIVE; + } + } else { + config.state = VRR_STATE_UNSUPPORTED; + } + + mod_freesync_build_vrr_params(dm->freesync_module, + new_stream, + &config, &vrr_params); + + new_crtc_state->freesync_config = config; + /* Copy state for access from DM IRQ handler */ + acrtc->dm_irq_params.freesync_config = config; + acrtc->dm_irq_params.active_planes = new_crtc_state->active_planes; + acrtc->dm_irq_params.vrr_params = vrr_params; + spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); +} + +static void amdgpu_dm_handle_vrr_transition(struct dm_crtc_state *old_state, + struct dm_crtc_state *new_state) +{ + bool old_vrr_active = amdgpu_dm_crtc_vrr_active(old_state); + bool new_vrr_active = amdgpu_dm_crtc_vrr_active(new_state); + + if (!old_vrr_active && new_vrr_active) { + /* Transition VRR inactive -> active: + * While VRR is active, we must not disable vblank irq, as a + * reenable after disable would compute bogus vblank/pflip + * timestamps if it likely happened inside display front-porch. + * + * We also need vupdate irq for the actual core vblank handling + * at end of vblank. + */ + WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, true) != 0); + WARN_ON(drm_crtc_vblank_get(new_state->base.crtc) != 0); + DRM_DEBUG_DRIVER("%s: crtc=%u VRR off->on: Get vblank ref\n", + __func__, new_state->base.crtc->base.id); + } else if (old_vrr_active && !new_vrr_active) { + /* Transition VRR active -> inactive: + * Allow vblank irq disable again for fixed refresh rate. + */ + WARN_ON(amdgpu_dm_crtc_set_vupdate_irq(new_state->base.crtc, false) != 0); + drm_crtc_vblank_put(new_state->base.crtc); + DRM_DEBUG_DRIVER("%s: crtc=%u VRR on->off: Drop vblank ref\n", + __func__, new_state->base.crtc->base.id); + } +} + +static void amdgpu_dm_commit_cursors(struct drm_atomic_state *state) +{ + struct drm_plane *plane; + struct drm_plane_state *old_plane_state; + int i; + + /* + * TODO: Make this per-stream so we don't issue redundant updates for + * commits with multiple streams. + */ + for_each_old_plane_in_state(state, plane, old_plane_state, i) + if (plane->type == DRM_PLANE_TYPE_CURSOR) + amdgpu_dm_plane_handle_cursor_update(plane, old_plane_state); +} + +static inline uint32_t get_mem_type(struct drm_framebuffer *fb) +{ + struct amdgpu_bo *abo = gem_to_amdgpu_bo(fb->obj[0]); + + return abo->tbo.resource ? abo->tbo.resource->mem_type : 0; +} + +static void amdgpu_dm_commit_planes(struct drm_atomic_state *state, + struct drm_device *dev, + struct amdgpu_display_manager *dm, + struct drm_crtc *pcrtc, + bool wait_for_vblank) +{ + u32 i; + u64 timestamp_ns = ktime_get_ns(); + struct drm_plane *plane; + struct drm_plane_state *old_plane_state, *new_plane_state; + struct amdgpu_crtc *acrtc_attach = to_amdgpu_crtc(pcrtc); + struct drm_crtc_state *new_pcrtc_state = + drm_atomic_get_new_crtc_state(state, pcrtc); + struct dm_crtc_state *acrtc_state = to_dm_crtc_state(new_pcrtc_state); + struct dm_crtc_state *dm_old_crtc_state = + to_dm_crtc_state(drm_atomic_get_old_crtc_state(state, pcrtc)); + int planes_count = 0, vpos, hpos; + unsigned long flags; + u32 target_vblank, last_flip_vblank; + bool vrr_active = amdgpu_dm_crtc_vrr_active(acrtc_state); + bool cursor_update = false; + bool pflip_present = false; + bool dirty_rects_changed = false; + struct { + struct dc_surface_update surface_updates[MAX_SURFACES]; + struct dc_plane_info plane_infos[MAX_SURFACES]; + struct dc_scaling_info scaling_infos[MAX_SURFACES]; + struct dc_flip_addrs flip_addrs[MAX_SURFACES]; + struct dc_stream_update stream_update; + } *bundle; + + bundle = kzalloc(sizeof(*bundle), GFP_KERNEL); + + if (!bundle) { + dm_error("Failed to allocate update bundle\n"); + goto cleanup; + } + + /* + * Disable the cursor first if we're disabling all the planes. + * It'll remain on the screen after the planes are re-enabled + * if we don't. + */ + if (acrtc_state->active_planes == 0) + amdgpu_dm_commit_cursors(state); + + /* update planes when needed */ + for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { + struct drm_crtc *crtc = new_plane_state->crtc; + struct drm_crtc_state *new_crtc_state; + struct drm_framebuffer *fb = new_plane_state->fb; + struct amdgpu_framebuffer *afb = (struct amdgpu_framebuffer *)fb; + bool plane_needs_flip; + struct dc_plane_state *dc_plane; + struct dm_plane_state *dm_new_plane_state = to_dm_plane_state(new_plane_state); + + /* Cursor plane is handled after stream updates */ + if (plane->type == DRM_PLANE_TYPE_CURSOR) { + if ((fb && crtc == pcrtc) || + (old_plane_state->fb && old_plane_state->crtc == pcrtc)) + cursor_update = true; + + continue; + } + + if (!fb || !crtc || pcrtc != crtc) + continue; + + new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc); + if (!new_crtc_state->active) + continue; + + dc_plane = dm_new_plane_state->dc_state; + if (!dc_plane) + continue; + + bundle->surface_updates[planes_count].surface = dc_plane; + if (new_pcrtc_state->color_mgmt_changed) { + bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction; + bundle->surface_updates[planes_count].in_transfer_func = dc_plane->in_transfer_func; + bundle->surface_updates[planes_count].gamut_remap_matrix = &dc_plane->gamut_remap_matrix; + } + + amdgpu_dm_plane_fill_dc_scaling_info(dm->adev, new_plane_state, + &bundle->scaling_infos[planes_count]); + + bundle->surface_updates[planes_count].scaling_info = + &bundle->scaling_infos[planes_count]; + + plane_needs_flip = old_plane_state->fb && new_plane_state->fb; + + pflip_present = pflip_present || plane_needs_flip; + + if (!plane_needs_flip) { + planes_count += 1; + continue; + } + + fill_dc_plane_info_and_addr( + dm->adev, new_plane_state, + afb->tiling_flags, + &bundle->plane_infos[planes_count], + &bundle->flip_addrs[planes_count].address, + afb->tmz_surface, false); + + drm_dbg_state(state->dev, "plane: id=%d dcc_en=%d\n", + new_plane_state->plane->index, + bundle->plane_infos[planes_count].dcc.enable); + + bundle->surface_updates[planes_count].plane_info = + &bundle->plane_infos[planes_count]; + + if (acrtc_state->stream->link->psr_settings.psr_feature_enabled || + acrtc_state->stream->link->replay_settings.replay_feature_enabled) { + fill_dc_dirty_rects(plane, old_plane_state, + new_plane_state, new_crtc_state, + &bundle->flip_addrs[planes_count], + &dirty_rects_changed); + + /* + * If the dirty regions changed, PSR-SU need to be disabled temporarily + * and enabled it again after dirty regions are stable to avoid video glitch. + * PSR-SU will be enabled in vblank_control_worker() if user pause the video + * during the PSR-SU was disabled. + */ + if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 && + acrtc_attach->dm_irq_params.allow_psr_entry && +#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY + !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) && +#endif + dirty_rects_changed) { + mutex_lock(&dm->dc_lock); + acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns = + timestamp_ns; + if (acrtc_state->stream->link->psr_settings.psr_allow_active) + amdgpu_dm_psr_disable(acrtc_state->stream); + mutex_unlock(&dm->dc_lock); + } + } + + /* + * Only allow immediate flips for fast updates that don't + * change memory domain, FB pitch, DCC state, rotation or + * mirroring. + * + * dm_crtc_helper_atomic_check() only accepts async flips with + * fast updates. + */ + if (crtc->state->async_flip && + (acrtc_state->update_type != UPDATE_TYPE_FAST || + get_mem_type(old_plane_state->fb) != get_mem_type(fb))) + drm_warn_once(state->dev, + "[PLANE:%d:%s] async flip with non-fast update\n", + plane->base.id, plane->name); + + bundle->flip_addrs[planes_count].flip_immediate = + crtc->state->async_flip && + acrtc_state->update_type == UPDATE_TYPE_FAST && + get_mem_type(old_plane_state->fb) == get_mem_type(fb); + + timestamp_ns = ktime_get_ns(); + bundle->flip_addrs[planes_count].flip_timestamp_in_us = div_u64(timestamp_ns, 1000); + bundle->surface_updates[planes_count].flip_addr = &bundle->flip_addrs[planes_count]; + bundle->surface_updates[planes_count].surface = dc_plane; + + if (!bundle->surface_updates[planes_count].surface) { + DRM_ERROR("No surface for CRTC: id=%d\n", + acrtc_attach->crtc_id); + continue; + } + + if (plane == pcrtc->primary) + update_freesync_state_on_stream( + dm, + acrtc_state, + acrtc_state->stream, + dc_plane, + bundle->flip_addrs[planes_count].flip_timestamp_in_us); + + drm_dbg_state(state->dev, "%s Flipping to hi: 0x%x, low: 0x%x\n", + __func__, + bundle->flip_addrs[planes_count].address.grph.addr.high_part, + bundle->flip_addrs[planes_count].address.grph.addr.low_part); + + planes_count += 1; + + } + + if (pflip_present) { + if (!vrr_active) { + /* Use old throttling in non-vrr fixed refresh rate mode + * to keep flip scheduling based on target vblank counts + * working in a backwards compatible way, e.g., for + * clients using the GLX_OML_sync_control extension or + * DRI3/Present extension with defined target_msc. + */ + last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc); + } else { + /* For variable refresh rate mode only: + * Get vblank of last completed flip to avoid > 1 vrr + * flips per video frame by use of throttling, but allow + * flip programming anywhere in the possibly large + * variable vrr vblank interval for fine-grained flip + * timing control and more opportunity to avoid stutter + * on late submission of flips. + */ + spin_lock_irqsave(&pcrtc->dev->event_lock, flags); + last_flip_vblank = acrtc_attach->dm_irq_params.last_flip_vblank; + spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); + } + + target_vblank = last_flip_vblank + wait_for_vblank; + + /* + * Wait until we're out of the vertical blank period before the one + * targeted by the flip + */ + while ((acrtc_attach->enabled && + (amdgpu_display_get_crtc_scanoutpos(dm->ddev, acrtc_attach->crtc_id, + 0, &vpos, &hpos, NULL, + NULL, &pcrtc->hwmode) + & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) == + (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) && + (int)(target_vblank - + amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) { + usleep_range(1000, 1100); + } + + /** + * Prepare the flip event for the pageflip interrupt to handle. + * + * This only works in the case where we've already turned on the + * appropriate hardware blocks (eg. HUBP) so in the transition case + * from 0 -> n planes we have to skip a hardware generated event + * and rely on sending it from software. + */ + if (acrtc_attach->base.state->event && + acrtc_state->active_planes > 0) { + drm_crtc_vblank_get(pcrtc); + + spin_lock_irqsave(&pcrtc->dev->event_lock, flags); + + WARN_ON(acrtc_attach->pflip_status != AMDGPU_FLIP_NONE); + prepare_flip_isr(acrtc_attach); + + spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); + } + + if (acrtc_state->stream) { + if (acrtc_state->freesync_vrr_info_changed) + bundle->stream_update.vrr_infopacket = + &acrtc_state->stream->vrr_infopacket; + } + } else if (cursor_update && acrtc_state->active_planes > 0 && + acrtc_attach->base.state->event) { + drm_crtc_vblank_get(pcrtc); + + spin_lock_irqsave(&pcrtc->dev->event_lock, flags); + + acrtc_attach->event = acrtc_attach->base.state->event; + acrtc_attach->base.state->event = NULL; + + spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); + } + + /* Update the planes if changed or disable if we don't have any. */ + if ((planes_count || acrtc_state->active_planes == 0) && + acrtc_state->stream) { + /* + * If PSR or idle optimizations are enabled then flush out + * any pending work before hardware programming. + */ + if (dm->vblank_control_workqueue) + flush_workqueue(dm->vblank_control_workqueue); + + bundle->stream_update.stream = acrtc_state->stream; + if (new_pcrtc_state->mode_changed) { + bundle->stream_update.src = acrtc_state->stream->src; + bundle->stream_update.dst = acrtc_state->stream->dst; + } + + if (new_pcrtc_state->color_mgmt_changed) { + /* + * TODO: This isn't fully correct since we've actually + * already modified the stream in place. + */ + bundle->stream_update.gamut_remap = + &acrtc_state->stream->gamut_remap_matrix; + bundle->stream_update.output_csc_transform = + &acrtc_state->stream->csc_color_matrix; + bundle->stream_update.out_transfer_func = + acrtc_state->stream->out_transfer_func; + } + + acrtc_state->stream->abm_level = acrtc_state->abm_level; + if (acrtc_state->abm_level != dm_old_crtc_state->abm_level) + bundle->stream_update.abm_level = &acrtc_state->abm_level; + + mutex_lock(&dm->dc_lock); + if ((acrtc_state->update_type > UPDATE_TYPE_FAST) && + acrtc_state->stream->link->psr_settings.psr_allow_active) + amdgpu_dm_psr_disable(acrtc_state->stream); + mutex_unlock(&dm->dc_lock); + + /* + * If FreeSync state on the stream has changed then we need to + * re-adjust the min/max bounds now that DC doesn't handle this + * as part of commit. + */ + if (is_dc_timing_adjust_needed(dm_old_crtc_state, acrtc_state)) { + spin_lock_irqsave(&pcrtc->dev->event_lock, flags); + dc_stream_adjust_vmin_vmax( + dm->dc, acrtc_state->stream, + &acrtc_attach->dm_irq_params.vrr_params.adjust); + spin_unlock_irqrestore(&pcrtc->dev->event_lock, flags); + } + mutex_lock(&dm->dc_lock); + update_planes_and_stream_adapter(dm->dc, + acrtc_state->update_type, + planes_count, + acrtc_state->stream, + &bundle->stream_update, + bundle->surface_updates); + + /** + * Enable or disable the interrupts on the backend. + * + * Most pipes are put into power gating when unused. + * + * When power gating is enabled on a pipe we lose the + * interrupt enablement state when power gating is disabled. + * + * So we need to update the IRQ control state in hardware + * whenever the pipe turns on (since it could be previously + * power gated) or off (since some pipes can't be power gated + * on some ASICs). + */ + if (dm_old_crtc_state->active_planes != acrtc_state->active_planes) + dm_update_pflip_irq_state(drm_to_adev(dev), + acrtc_attach); + + if ((acrtc_state->update_type > UPDATE_TYPE_FAST) && + acrtc_state->stream->link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED && + !acrtc_state->stream->link->psr_settings.psr_feature_enabled) + amdgpu_dm_link_setup_psr(acrtc_state->stream); + + /* Decrement skip count when PSR is enabled and we're doing fast updates. */ + if (acrtc_state->update_type == UPDATE_TYPE_FAST && + acrtc_state->stream->link->psr_settings.psr_feature_enabled) { + struct amdgpu_dm_connector *aconn = + (struct amdgpu_dm_connector *)acrtc_state->stream->dm_stream_context; + + if (aconn->psr_skip_count > 0) + aconn->psr_skip_count--; + + /* Allow PSR when skip count is 0. */ + acrtc_attach->dm_irq_params.allow_psr_entry = !aconn->psr_skip_count; + + /* + * If sink supports PSR SU, there is no need to rely on + * a vblank event disable request to enable PSR. PSR SU + * can be enabled immediately once OS demonstrates an + * adequate number of fast atomic commits to notify KMD + * of update events. See `vblank_control_worker()`. + */ + if (acrtc_state->stream->link->psr_settings.psr_version >= DC_PSR_VERSION_SU_1 && + acrtc_attach->dm_irq_params.allow_psr_entry && +#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY + !amdgpu_dm_crc_window_is_activated(acrtc_state->base.crtc) && +#endif + !acrtc_state->stream->link->psr_settings.psr_allow_active && + (timestamp_ns - + acrtc_state->stream->link->psr_settings.psr_dirty_rects_change_timestamp_ns) > + 500000000) + amdgpu_dm_psr_enable(acrtc_state->stream); + } else { + acrtc_attach->dm_irq_params.allow_psr_entry = false; + } + + mutex_unlock(&dm->dc_lock); + } + + /* + * Update cursor state *after* programming all the planes. + * This avoids redundant programming in the case where we're going + * to be disabling a single plane - those pipes are being disabled. + */ + if (acrtc_state->active_planes) + amdgpu_dm_commit_cursors(state); + +cleanup: + kfree(bundle); +} + +static void amdgpu_dm_commit_audio(struct drm_device *dev, + struct drm_atomic_state *state) +{ + struct amdgpu_device *adev = drm_to_adev(dev); + struct amdgpu_dm_connector *aconnector; + struct drm_connector *connector; + struct drm_connector_state *old_con_state, *new_con_state; + struct drm_crtc_state *new_crtc_state; + struct dm_crtc_state *new_dm_crtc_state; + const struct dc_stream_status *status; + int i, inst; + + /* Notify device removals. */ + for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { + if (old_con_state->crtc != new_con_state->crtc) { + /* CRTC changes require notification. */ + goto notify; + } + + if (!new_con_state->crtc) + continue; + + new_crtc_state = drm_atomic_get_new_crtc_state( + state, new_con_state->crtc); + + if (!new_crtc_state) + continue; + + if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) + continue; + +notify: + aconnector = to_amdgpu_dm_connector(connector); + + mutex_lock(&adev->dm.audio_lock); + inst = aconnector->audio_inst; + aconnector->audio_inst = -1; + mutex_unlock(&adev->dm.audio_lock); + + amdgpu_dm_audio_eld_notify(adev, inst); + } + + /* Notify audio device additions. */ + for_each_new_connector_in_state(state, connector, new_con_state, i) { + if (!new_con_state->crtc) + continue; + + new_crtc_state = drm_atomic_get_new_crtc_state( + state, new_con_state->crtc); + + if (!new_crtc_state) + continue; + + if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) + continue; + + new_dm_crtc_state = to_dm_crtc_state(new_crtc_state); + if (!new_dm_crtc_state->stream) + continue; + + status = dc_stream_get_status(new_dm_crtc_state->stream); + if (!status) + continue; + + aconnector = to_amdgpu_dm_connector(connector); + + mutex_lock(&adev->dm.audio_lock); + inst = status->audio_inst; + aconnector->audio_inst = inst; + mutex_unlock(&adev->dm.audio_lock); + + amdgpu_dm_audio_eld_notify(adev, inst); + } +} + +/* + * amdgpu_dm_crtc_copy_transient_flags - copy mirrored flags from DRM to DC + * @crtc_state: the DRM CRTC state + * @stream_state: the DC stream state. + * + * Copy the mirrored transient state flags from DRM, to DC. It is used to bring + * a dc_stream_state's flags in sync with a drm_crtc_state's flags. + */ +static void amdgpu_dm_crtc_copy_transient_flags(struct drm_crtc_state *crtc_state, + struct dc_stream_state *stream_state) +{ + stream_state->mode_changed = drm_atomic_crtc_needs_modeset(crtc_state); +} + +static void amdgpu_dm_commit_streams(struct drm_atomic_state *state, + struct dc_state *dc_state) +{ + struct drm_device *dev = state->dev; + struct amdgpu_device *adev = drm_to_adev(dev); + struct amdgpu_display_manager *dm = &adev->dm; + struct drm_crtc *crtc; + struct drm_crtc_state *old_crtc_state, *new_crtc_state; + struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; + bool mode_set_reset_required = false; + u32 i; + + for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, + new_crtc_state, i) { + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + + dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); + + if (old_crtc_state->active && + (!new_crtc_state->active || + drm_atomic_crtc_needs_modeset(new_crtc_state))) { + manage_dm_interrupts(adev, acrtc, false); + dc_stream_release(dm_old_crtc_state->stream); + } + } + + drm_atomic_helper_calc_timestamping_constants(state); + + /* update changed items */ + for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + + dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); + dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); + + drm_dbg_state(state->dev, + "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n", + acrtc->crtc_id, + new_crtc_state->enable, + new_crtc_state->active, + new_crtc_state->planes_changed, + new_crtc_state->mode_changed, + new_crtc_state->active_changed, + new_crtc_state->connectors_changed); + + /* Disable cursor if disabling crtc */ + if (old_crtc_state->active && !new_crtc_state->active) { + struct dc_cursor_position position; + + memset(&position, 0, sizeof(position)); + mutex_lock(&dm->dc_lock); + dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position); + mutex_unlock(&dm->dc_lock); + } + + /* Copy all transient state flags into dc state */ + if (dm_new_crtc_state->stream) { + amdgpu_dm_crtc_copy_transient_flags(&dm_new_crtc_state->base, + dm_new_crtc_state->stream); + } + + /* handles headless hotplug case, updating new_state and + * aconnector as needed + */ + + if (amdgpu_dm_crtc_modeset_required(new_crtc_state, dm_new_crtc_state->stream, dm_old_crtc_state->stream)) { + + DRM_DEBUG_ATOMIC("Atomic commit: SET crtc id %d: [%p]\n", acrtc->crtc_id, acrtc); + + if (!dm_new_crtc_state->stream) { + /* + * this could happen because of issues with + * userspace notifications delivery. + * In this case userspace tries to set mode on + * display which is disconnected in fact. + * dc_sink is NULL in this case on aconnector. + * We expect reset mode will come soon. + * + * This can also happen when unplug is done + * during resume sequence ended + * + * In this case, we want to pretend we still + * have a sink to keep the pipe running so that + * hw state is consistent with the sw state + */ + DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n", + __func__, acrtc->base.base.id); + continue; + } + + if (dm_old_crtc_state->stream) + remove_stream(adev, acrtc, dm_old_crtc_state->stream); + + pm_runtime_get_noresume(dev->dev); + + acrtc->enabled = true; + acrtc->hw_mode = new_crtc_state->mode; + crtc->hwmode = new_crtc_state->mode; + mode_set_reset_required = true; + } else if (modereset_required(new_crtc_state)) { + DRM_DEBUG_ATOMIC("Atomic commit: RESET. crtc id %d:[%p]\n", acrtc->crtc_id, acrtc); + /* i.e. reset mode */ + if (dm_old_crtc_state->stream) + remove_stream(adev, acrtc, dm_old_crtc_state->stream); + + mode_set_reset_required = true; + } + } /* for_each_crtc_in_state() */ + + /* if there mode set or reset, disable eDP PSR */ + if (mode_set_reset_required) { + if (dm->vblank_control_workqueue) + flush_workqueue(dm->vblank_control_workqueue); + + amdgpu_dm_psr_disable_all(dm); + } + + dm_enable_per_frame_crtc_master_sync(dc_state); + mutex_lock(&dm->dc_lock); + WARN_ON(!dc_commit_streams(dm->dc, dc_state->streams, dc_state->stream_count)); + + /* Allow idle optimization when vblank count is 0 for display off */ + if (dm->active_vblank_irq_count == 0) + dc_allow_idle_optimizations(dm->dc, true); + mutex_unlock(&dm->dc_lock); + + for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + + dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); + + if (dm_new_crtc_state->stream != NULL) { + const struct dc_stream_status *status = + dc_stream_get_status(dm_new_crtc_state->stream); + + if (!status) + status = dc_stream_get_status_from_state(dc_state, + dm_new_crtc_state->stream); + if (!status) + DC_ERR("got no status for stream %p on acrtc%p\n", dm_new_crtc_state->stream, acrtc); + else + acrtc->otg_inst = status->primary_otg_inst; + } + } +} + +/** + * amdgpu_dm_atomic_commit_tail() - AMDgpu DM's commit tail implementation. + * @state: The atomic state to commit + * + * This will tell DC to commit the constructed DC state from atomic_check, + * programming the hardware. Any failures here implies a hardware failure, since + * atomic check should have filtered anything non-kosher. + */ +static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state) +{ + struct drm_device *dev = state->dev; + struct amdgpu_device *adev = drm_to_adev(dev); + struct amdgpu_display_manager *dm = &adev->dm; + struct dm_atomic_state *dm_state; + struct dc_state *dc_state = NULL; + u32 i, j; + struct drm_crtc *crtc; + struct drm_crtc_state *old_crtc_state, *new_crtc_state; + unsigned long flags; + bool wait_for_vblank = true; + struct drm_connector *connector; + struct drm_connector_state *old_con_state, *new_con_state; + struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; + int crtc_disable_count = 0; + + trace_amdgpu_dm_atomic_commit_tail_begin(state); + + drm_atomic_helper_update_legacy_modeset_state(dev, state); + drm_dp_mst_atomic_wait_for_dependencies(state); + + dm_state = dm_atomic_get_new_state(state); + if (dm_state && dm_state->context) { + dc_state = dm_state->context; + amdgpu_dm_commit_streams(state, dc_state); + } + + for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { + struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + + if (!adev->dm.hdcp_workqueue) + continue; + + pr_debug("[HDCP_DM] -------------- i : %x ----------\n", i); + + if (!connector) + continue; + + pr_debug("[HDCP_DM] connector->index: %x connect_status: %x dpms: %x\n", + connector->index, connector->status, connector->dpms); + pr_debug("[HDCP_DM] state protection old: %x new: %x\n", + old_con_state->content_protection, new_con_state->content_protection); + + if (aconnector->dc_sink) { + if (aconnector->dc_sink->sink_signal != SIGNAL_TYPE_VIRTUAL && + aconnector->dc_sink->sink_signal != SIGNAL_TYPE_NONE) { + pr_debug("[HDCP_DM] pipe_ctx dispname=%s\n", + aconnector->dc_sink->edid_caps.display_name); + } + } + + new_crtc_state = NULL; + old_crtc_state = NULL; + + if (acrtc) { + new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); + old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base); + } + + if (old_crtc_state) + pr_debug("old crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", + old_crtc_state->enable, + old_crtc_state->active, + old_crtc_state->mode_changed, + old_crtc_state->active_changed, + old_crtc_state->connectors_changed); + + if (new_crtc_state) + pr_debug("NEW crtc en: %x a: %x m: %x a-chg: %x c-chg: %x\n", + new_crtc_state->enable, + new_crtc_state->active, + new_crtc_state->mode_changed, + new_crtc_state->active_changed, + new_crtc_state->connectors_changed); + } + + for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { + struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + + if (!adev->dm.hdcp_workqueue) + continue; + + new_crtc_state = NULL; + old_crtc_state = NULL; + + if (acrtc) { + new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); + old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base); + } + + dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); + + if (dm_new_crtc_state && dm_new_crtc_state->stream == NULL && + connector->state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) { + hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index); + new_con_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; + dm_new_con_state->update_hdcp = true; + continue; + } + + if (is_content_protection_different(new_crtc_state, old_crtc_state, new_con_state, + old_con_state, connector, adev->dm.hdcp_workqueue)) { + /* when display is unplugged from mst hub, connctor will + * be destroyed within dm_dp_mst_connector_destroy. connector + * hdcp perperties, like type, undesired, desired, enabled, + * will be lost. So, save hdcp properties into hdcp_work within + * amdgpu_dm_atomic_commit_tail. if the same display is + * plugged back with same display index, its hdcp properties + * will be retrieved from hdcp_work within dm_dp_mst_get_modes + */ + + bool enable_encryption = false; + + if (new_con_state->content_protection == DRM_MODE_CONTENT_PROTECTION_DESIRED) + enable_encryption = true; + + if (aconnector->dc_link && aconnector->dc_sink && + aconnector->dc_link->type == dc_connection_mst_branch) { + struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue; + struct hdcp_workqueue *hdcp_w = + &hdcp_work[aconnector->dc_link->link_index]; + + hdcp_w->hdcp_content_type[connector->index] = + new_con_state->hdcp_content_type; + hdcp_w->content_protection[connector->index] = + new_con_state->content_protection; + } + + if (new_crtc_state && new_crtc_state->mode_changed && + new_con_state->content_protection >= DRM_MODE_CONTENT_PROTECTION_DESIRED) + enable_encryption = true; + + DRM_INFO("[HDCP_DM] hdcp_update_display enable_encryption = %x\n", enable_encryption); + + hdcp_update_display( + adev->dm.hdcp_workqueue, aconnector->dc_link->link_index, aconnector, + new_con_state->hdcp_content_type, enable_encryption); + } + } + + /* Handle connector state changes */ + for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { + struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); + struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); + struct dc_surface_update *dummy_updates; + struct dc_stream_update stream_update; + struct dc_info_packet hdr_packet; + struct dc_stream_status *status = NULL; + bool abm_changed, hdr_changed, scaling_changed; + + memset(&stream_update, 0, sizeof(stream_update)); + + if (acrtc) { + new_crtc_state = drm_atomic_get_new_crtc_state(state, &acrtc->base); + old_crtc_state = drm_atomic_get_old_crtc_state(state, &acrtc->base); + } + + /* Skip any modesets/resets */ + if (!acrtc || drm_atomic_crtc_needs_modeset(new_crtc_state)) + continue; + + dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); + dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); + + scaling_changed = is_scaling_state_different(dm_new_con_state, + dm_old_con_state); + + abm_changed = dm_new_crtc_state->abm_level != + dm_old_crtc_state->abm_level; + + hdr_changed = + !drm_connector_atomic_hdr_metadata_equal(old_con_state, new_con_state); + + if (!scaling_changed && !abm_changed && !hdr_changed) + continue; + + stream_update.stream = dm_new_crtc_state->stream; + if (scaling_changed) { + update_stream_scaling_settings(&dm_new_con_state->base.crtc->mode, + dm_new_con_state, dm_new_crtc_state->stream); + + stream_update.src = dm_new_crtc_state->stream->src; + stream_update.dst = dm_new_crtc_state->stream->dst; + } + + if (abm_changed) { + dm_new_crtc_state->stream->abm_level = dm_new_crtc_state->abm_level; + + stream_update.abm_level = &dm_new_crtc_state->abm_level; + } + + if (hdr_changed) { + fill_hdr_info_packet(new_con_state, &hdr_packet); + stream_update.hdr_static_metadata = &hdr_packet; + } + + status = dc_stream_get_status(dm_new_crtc_state->stream); + + if (WARN_ON(!status)) + continue; + + WARN_ON(!status->plane_count); + + /* + * TODO: DC refuses to perform stream updates without a dc_surface_update. + * Here we create an empty update on each plane. + * To fix this, DC should permit updating only stream properties. + */ + dummy_updates = kzalloc(sizeof(struct dc_surface_update) * MAX_SURFACES, GFP_ATOMIC); + for (j = 0; j < status->plane_count; j++) + dummy_updates[j].surface = status->plane_states[0]; + + + mutex_lock(&dm->dc_lock); + dc_update_planes_and_stream(dm->dc, + dummy_updates, + status->plane_count, + dm_new_crtc_state->stream, + &stream_update); + mutex_unlock(&dm->dc_lock); + kfree(dummy_updates); + } + + /** + * Enable interrupts for CRTCs that are newly enabled or went through + * a modeset. It was intentionally deferred until after the front end + * state was modified to wait until the OTG was on and so the IRQ + * handlers didn't access stale or invalid state. + */ + for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); +#ifdef CONFIG_DEBUG_FS + enum amdgpu_dm_pipe_crc_source cur_crc_src; +#endif + /* Count number of newly disabled CRTCs for dropping PM refs later. */ + if (old_crtc_state->active && !new_crtc_state->active) + crtc_disable_count++; + + dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); + dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); + + /* For freesync config update on crtc state and params for irq */ + update_stream_irq_parameters(dm, dm_new_crtc_state); + +#ifdef CONFIG_DEBUG_FS + spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); + cur_crc_src = acrtc->dm_irq_params.crc_src; + spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); +#endif + + if (new_crtc_state->active && + (!old_crtc_state->active || + drm_atomic_crtc_needs_modeset(new_crtc_state))) { + dc_stream_retain(dm_new_crtc_state->stream); + acrtc->dm_irq_params.stream = dm_new_crtc_state->stream; + manage_dm_interrupts(adev, acrtc, true); + } + /* Handle vrr on->off / off->on transitions */ + amdgpu_dm_handle_vrr_transition(dm_old_crtc_state, dm_new_crtc_state); + +#ifdef CONFIG_DEBUG_FS + if (new_crtc_state->active && + (!old_crtc_state->active || + drm_atomic_crtc_needs_modeset(new_crtc_state))) { + /** + * Frontend may have changed so reapply the CRC capture + * settings for the stream. + */ + if (amdgpu_dm_is_valid_crc_source(cur_crc_src)) { +#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) + if (amdgpu_dm_crc_window_is_activated(crtc)) { + spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); + acrtc->dm_irq_params.window_param.update_win = true; + + /** + * It takes 2 frames for HW to stably generate CRC when + * resuming from suspend, so we set skip_frame_cnt 2. + */ + acrtc->dm_irq_params.window_param.skip_frame_cnt = 2; + spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); + } +#endif + if (amdgpu_dm_crtc_configure_crc_source( + crtc, dm_new_crtc_state, cur_crc_src)) + DRM_DEBUG_DRIVER("Failed to configure crc source"); + } + } +#endif + } + + for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) + if (new_crtc_state->async_flip) + wait_for_vblank = false; + + /* update planes when needed per crtc*/ + for_each_new_crtc_in_state(state, crtc, new_crtc_state, j) { + dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); + + if (dm_new_crtc_state->stream) + amdgpu_dm_commit_planes(state, dev, dm, crtc, wait_for_vblank); + } + + /* Update audio instances for each connector. */ + amdgpu_dm_commit_audio(dev, state); + + /* restore the backlight level */ + for (i = 0; i < dm->num_of_edps; i++) { + if (dm->backlight_dev[i] && + (dm->actual_brightness[i] != dm->brightness[i])) + amdgpu_dm_backlight_set_level(dm, i, dm->brightness[i]); + } + + /* + * send vblank event on all events not handled in flip and + * mark consumed event for drm_atomic_helper_commit_hw_done + */ + spin_lock_irqsave(&adev_to_drm(adev)->event_lock, flags); + for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { + + if (new_crtc_state->event) + drm_send_event_locked(dev, &new_crtc_state->event->base); + + new_crtc_state->event = NULL; + } + spin_unlock_irqrestore(&adev_to_drm(adev)->event_lock, flags); + + /* Signal HW programming completion */ + drm_atomic_helper_commit_hw_done(state); + + if (wait_for_vblank) + drm_atomic_helper_wait_for_flip_done(dev, state); + + drm_atomic_helper_cleanup_planes(dev, state); + + /* Don't free the memory if we are hitting this as part of suspend. + * This way we don't free any memory during suspend; see + * amdgpu_bo_free_kernel(). The memory will be freed in the first + * non-suspend modeset or when the driver is torn down. + */ + if (!adev->in_suspend) { + /* return the stolen vga memory back to VRAM */ + if (!adev->mman.keep_stolen_vga_memory) + amdgpu_bo_free_kernel(&adev->mman.stolen_vga_memory, NULL, NULL); + amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL); + } + + /* + * Finally, drop a runtime PM reference for each newly disabled CRTC, + * so we can put the GPU into runtime suspend if we're not driving any + * displays anymore + */ + for (i = 0; i < crtc_disable_count; i++) + pm_runtime_put_autosuspend(dev->dev); + pm_runtime_mark_last_busy(dev->dev); +} + +static int dm_force_atomic_commit(struct drm_connector *connector) +{ + int ret = 0; + struct drm_device *ddev = connector->dev; + struct drm_atomic_state *state = drm_atomic_state_alloc(ddev); + struct amdgpu_crtc *disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc); + struct drm_plane *plane = disconnected_acrtc->base.primary; + struct drm_connector_state *conn_state; + struct drm_crtc_state *crtc_state; + struct drm_plane_state *plane_state; + + if (!state) + return -ENOMEM; + + state->acquire_ctx = ddev->mode_config.acquire_ctx; + + /* Construct an atomic state to restore previous display setting */ + + /* + * Attach connectors to drm_atomic_state + */ + conn_state = drm_atomic_get_connector_state(state, connector); + + ret = PTR_ERR_OR_ZERO(conn_state); + if (ret) + goto out; + + /* Attach crtc to drm_atomic_state*/ + crtc_state = drm_atomic_get_crtc_state(state, &disconnected_acrtc->base); + + ret = PTR_ERR_OR_ZERO(crtc_state); + if (ret) + goto out; + + /* force a restore */ + crtc_state->mode_changed = true; + + /* Attach plane to drm_atomic_state */ + plane_state = drm_atomic_get_plane_state(state, plane); + + ret = PTR_ERR_OR_ZERO(plane_state); + if (ret) + goto out; + + /* Call commit internally with the state we just constructed */ + ret = drm_atomic_commit(state); + +out: + drm_atomic_state_put(state); + if (ret) + DRM_ERROR("Restoring old state failed with %i\n", ret); + + return ret; +} + +/* + * This function handles all cases when set mode does not come upon hotplug. + * This includes when a display is unplugged then plugged back into the + * same port and when running without usermode desktop manager supprot + */ +void dm_restore_drm_connector_state(struct drm_device *dev, + struct drm_connector *connector) +{ + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + struct amdgpu_crtc *disconnected_acrtc; + struct dm_crtc_state *acrtc_state; + + if (!aconnector->dc_sink || !connector->state || !connector->encoder) + return; + + disconnected_acrtc = to_amdgpu_crtc(connector->encoder->crtc); + if (!disconnected_acrtc) + return; + + acrtc_state = to_dm_crtc_state(disconnected_acrtc->base.state); + if (!acrtc_state->stream) + return; + + /* + * If the previous sink is not released and different from the current, + * we deduce we are in a state where we can not rely on usermode call + * to turn on the display, so we do it here + */ + if (acrtc_state->stream->sink != aconnector->dc_sink) + dm_force_atomic_commit(&aconnector->base); +} + +/* + * Grabs all modesetting locks to serialize against any blocking commits, + * Waits for completion of all non blocking commits. + */ +static int do_aquire_global_lock(struct drm_device *dev, + struct drm_atomic_state *state) +{ + struct drm_crtc *crtc; + struct drm_crtc_commit *commit; + long ret; + + /* + * Adding all modeset locks to aquire_ctx will + * ensure that when the framework release it the + * extra locks we are locking here will get released to + */ + ret = drm_modeset_lock_all_ctx(dev, state->acquire_ctx); + if (ret) + return ret; + + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + spin_lock(&crtc->commit_lock); + commit = list_first_entry_or_null(&crtc->commit_list, + struct drm_crtc_commit, commit_entry); + if (commit) + drm_crtc_commit_get(commit); + spin_unlock(&crtc->commit_lock); + + if (!commit) + continue; + + /* + * Make sure all pending HW programming completed and + * page flips done + */ + ret = wait_for_completion_interruptible_timeout(&commit->hw_done, 10*HZ); + + if (ret > 0) + ret = wait_for_completion_interruptible_timeout( + &commit->flip_done, 10*HZ); + + if (ret == 0) + DRM_ERROR("[CRTC:%d:%s] hw_done or flip_done timed out\n", + crtc->base.id, crtc->name); + + drm_crtc_commit_put(commit); + } + + return ret < 0 ? ret : 0; +} + +static void get_freesync_config_for_crtc( + struct dm_crtc_state *new_crtc_state, + struct dm_connector_state *new_con_state) +{ + struct mod_freesync_config config = {0}; + struct amdgpu_dm_connector *aconnector = + to_amdgpu_dm_connector(new_con_state->base.connector); + struct drm_display_mode *mode = &new_crtc_state->base.mode; + int vrefresh = drm_mode_vrefresh(mode); + bool fs_vid_mode = false; + + new_crtc_state->vrr_supported = new_con_state->freesync_capable && + vrefresh >= aconnector->min_vfreq && + vrefresh <= aconnector->max_vfreq; + + if (new_crtc_state->vrr_supported) { + new_crtc_state->stream->ignore_msa_timing_param = true; + fs_vid_mode = new_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED; + + config.min_refresh_in_uhz = aconnector->min_vfreq * 1000000; + config.max_refresh_in_uhz = aconnector->max_vfreq * 1000000; + config.vsif_supported = true; + config.btr = true; + + if (fs_vid_mode) { + config.state = VRR_STATE_ACTIVE_FIXED; + config.fixed_refresh_in_uhz = new_crtc_state->freesync_config.fixed_refresh_in_uhz; + goto out; + } else if (new_crtc_state->base.vrr_enabled) { + config.state = VRR_STATE_ACTIVE_VARIABLE; + } else { + config.state = VRR_STATE_INACTIVE; + } + } +out: + new_crtc_state->freesync_config = config; +} + +static void reset_freesync_config_for_crtc( + struct dm_crtc_state *new_crtc_state) +{ + new_crtc_state->vrr_supported = false; + + memset(&new_crtc_state->vrr_infopacket, 0, + sizeof(new_crtc_state->vrr_infopacket)); +} + +static bool +is_timing_unchanged_for_freesync(struct drm_crtc_state *old_crtc_state, + struct drm_crtc_state *new_crtc_state) +{ + const struct drm_display_mode *old_mode, *new_mode; + + if (!old_crtc_state || !new_crtc_state) + return false; + + old_mode = &old_crtc_state->mode; + new_mode = &new_crtc_state->mode; + + if (old_mode->clock == new_mode->clock && + old_mode->hdisplay == new_mode->hdisplay && + old_mode->vdisplay == new_mode->vdisplay && + old_mode->htotal == new_mode->htotal && + old_mode->vtotal != new_mode->vtotal && + old_mode->hsync_start == new_mode->hsync_start && + old_mode->vsync_start != new_mode->vsync_start && + old_mode->hsync_end == new_mode->hsync_end && + old_mode->vsync_end != new_mode->vsync_end && + old_mode->hskew == new_mode->hskew && + old_mode->vscan == new_mode->vscan && + (old_mode->vsync_end - old_mode->vsync_start) == + (new_mode->vsync_end - new_mode->vsync_start)) + return true; + + return false; +} + +static void set_freesync_fixed_config(struct dm_crtc_state *dm_new_crtc_state) +{ + u64 num, den, res; + struct drm_crtc_state *new_crtc_state = &dm_new_crtc_state->base; + + dm_new_crtc_state->freesync_config.state = VRR_STATE_ACTIVE_FIXED; + + num = (unsigned long long)new_crtc_state->mode.clock * 1000 * 1000000; + den = (unsigned long long)new_crtc_state->mode.htotal * + (unsigned long long)new_crtc_state->mode.vtotal; + + res = div_u64(num, den); + dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = res; +} + +static int dm_update_crtc_state(struct amdgpu_display_manager *dm, + struct drm_atomic_state *state, + struct drm_crtc *crtc, + struct drm_crtc_state *old_crtc_state, + struct drm_crtc_state *new_crtc_state, + bool enable, + bool *lock_and_validation_needed) +{ + struct dm_atomic_state *dm_state = NULL; + struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; + struct dc_stream_state *new_stream; + int ret = 0; + + /* + * TODO Move this code into dm_crtc_atomic_check once we get rid of dc_validation_set + * update changed items + */ + struct amdgpu_crtc *acrtc = NULL; + struct amdgpu_dm_connector *aconnector = NULL; + struct drm_connector_state *drm_new_conn_state = NULL, *drm_old_conn_state = NULL; + struct dm_connector_state *dm_new_conn_state = NULL, *dm_old_conn_state = NULL; + + new_stream = NULL; + + dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); + dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); + acrtc = to_amdgpu_crtc(crtc); + aconnector = amdgpu_dm_find_first_crtc_matching_connector(state, crtc); + + /* TODO This hack should go away */ + if (aconnector && enable) { + /* Make sure fake sink is created in plug-in scenario */ + drm_new_conn_state = drm_atomic_get_new_connector_state(state, + &aconnector->base); + drm_old_conn_state = drm_atomic_get_old_connector_state(state, + &aconnector->base); + + if (IS_ERR(drm_new_conn_state)) { + ret = PTR_ERR_OR_ZERO(drm_new_conn_state); + goto fail; + } + + dm_new_conn_state = to_dm_connector_state(drm_new_conn_state); + dm_old_conn_state = to_dm_connector_state(drm_old_conn_state); + + if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) + goto skip_modeset; + + new_stream = create_validate_stream_for_sink(aconnector, + &new_crtc_state->mode, + dm_new_conn_state, + dm_old_crtc_state->stream); + + /* + * we can have no stream on ACTION_SET if a display + * was disconnected during S3, in this case it is not an + * error, the OS will be updated after detection, and + * will do the right thing on next atomic commit + */ + + if (!new_stream) { + DRM_DEBUG_DRIVER("%s: Failed to create new stream for crtc %d\n", + __func__, acrtc->base.base.id); + ret = -ENOMEM; + goto fail; + } + + /* + * TODO: Check VSDB bits to decide whether this should + * be enabled or not. + */ + new_stream->triggered_crtc_reset.enabled = + dm->force_timing_sync; + + dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level; + + ret = fill_hdr_info_packet(drm_new_conn_state, + &new_stream->hdr_static_metadata); + if (ret) + goto fail; + + /* + * If we already removed the old stream from the context + * (and set the new stream to NULL) then we can't reuse + * the old stream even if the stream and scaling are unchanged. + * We'll hit the BUG_ON and black screen. + * + * TODO: Refactor this function to allow this check to work + * in all conditions. + */ + if (dm_new_crtc_state->stream && + is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state)) + goto skip_modeset; + + if (dm_new_crtc_state->stream && + dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) && + dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream)) { + new_crtc_state->mode_changed = false; + DRM_DEBUG_DRIVER("Mode change not required, setting mode_changed to %d", + new_crtc_state->mode_changed); + } + } + + /* mode_changed flag may get updated above, need to check again */ + if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) + goto skip_modeset; + + drm_dbg_state(state->dev, + "amdgpu_crtc id:%d crtc_state_flags: enable:%d, active:%d, planes_changed:%d, mode_changed:%d,active_changed:%d,connectors_changed:%d\n", + acrtc->crtc_id, + new_crtc_state->enable, + new_crtc_state->active, + new_crtc_state->planes_changed, + new_crtc_state->mode_changed, + new_crtc_state->active_changed, + new_crtc_state->connectors_changed); + + /* Remove stream for any changed/disabled CRTC */ + if (!enable) { + + if (!dm_old_crtc_state->stream) + goto skip_modeset; + + /* Unset freesync video if it was active before */ + if (dm_old_crtc_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED) { + dm_new_crtc_state->freesync_config.state = VRR_STATE_INACTIVE; + dm_new_crtc_state->freesync_config.fixed_refresh_in_uhz = 0; + } + + /* Now check if we should set freesync video mode */ + if (dm_new_crtc_state->stream && + dc_is_stream_unchanged(new_stream, dm_old_crtc_state->stream) && + dc_is_stream_scaling_unchanged(new_stream, dm_old_crtc_state->stream) && + is_timing_unchanged_for_freesync(new_crtc_state, + old_crtc_state)) { + new_crtc_state->mode_changed = false; + DRM_DEBUG_DRIVER( + "Mode change not required for front porch change, setting mode_changed to %d", + new_crtc_state->mode_changed); + + set_freesync_fixed_config(dm_new_crtc_state); + + goto skip_modeset; + } else if (aconnector && + is_freesync_video_mode(&new_crtc_state->mode, + aconnector)) { + struct drm_display_mode *high_mode; + + high_mode = get_highest_refresh_rate_mode(aconnector, false); + if (!drm_mode_equal(&new_crtc_state->mode, high_mode)) + set_freesync_fixed_config(dm_new_crtc_state); + } + + ret = dm_atomic_get_state(state, &dm_state); + if (ret) + goto fail; + + DRM_DEBUG_DRIVER("Disabling DRM crtc: %d\n", + crtc->base.id); + + /* i.e. reset mode */ + if (dc_remove_stream_from_ctx( + dm->dc, + dm_state->context, + dm_old_crtc_state->stream) != DC_OK) { + ret = -EINVAL; + goto fail; + } + + dc_stream_release(dm_old_crtc_state->stream); + dm_new_crtc_state->stream = NULL; + + reset_freesync_config_for_crtc(dm_new_crtc_state); + + *lock_and_validation_needed = true; + + } else {/* Add stream for any updated/enabled CRTC */ + /* + * Quick fix to prevent NULL pointer on new_stream when + * added MST connectors not found in existing crtc_state in the chained mode + * TODO: need to dig out the root cause of that + */ + if (!aconnector) + goto skip_modeset; + + if (modereset_required(new_crtc_state)) + goto skip_modeset; + + if (amdgpu_dm_crtc_modeset_required(new_crtc_state, new_stream, + dm_old_crtc_state->stream)) { + + WARN_ON(dm_new_crtc_state->stream); + + ret = dm_atomic_get_state(state, &dm_state); + if (ret) + goto fail; + + dm_new_crtc_state->stream = new_stream; + + dc_stream_retain(new_stream); + + DRM_DEBUG_ATOMIC("Enabling DRM crtc: %d\n", + crtc->base.id); + + if (dc_add_stream_to_ctx( + dm->dc, + dm_state->context, + dm_new_crtc_state->stream) != DC_OK) { + ret = -EINVAL; + goto fail; + } + + *lock_and_validation_needed = true; + } + } + +skip_modeset: + /* Release extra reference */ + if (new_stream) + dc_stream_release(new_stream); + + /* + * We want to do dc stream updates that do not require a + * full modeset below. + */ + if (!(enable && aconnector && new_crtc_state->active)) + return 0; + /* + * Given above conditions, the dc state cannot be NULL because: + * 1. We're in the process of enabling CRTCs (just been added + * to the dc context, or already is on the context) + * 2. Has a valid connector attached, and + * 3. Is currently active and enabled. + * => The dc stream state currently exists. + */ + BUG_ON(dm_new_crtc_state->stream == NULL); + + /* Scaling or underscan settings */ + if (is_scaling_state_different(dm_old_conn_state, dm_new_conn_state) || + drm_atomic_crtc_needs_modeset(new_crtc_state)) + update_stream_scaling_settings( + &new_crtc_state->mode, dm_new_conn_state, dm_new_crtc_state->stream); + + /* ABM settings */ + dm_new_crtc_state->abm_level = dm_new_conn_state->abm_level; + + /* + * Color management settings. We also update color properties + * when a modeset is needed, to ensure it gets reprogrammed. + */ + if (dm_new_crtc_state->base.color_mgmt_changed || + drm_atomic_crtc_needs_modeset(new_crtc_state)) { + ret = amdgpu_dm_update_crtc_color_mgmt(dm_new_crtc_state); + if (ret) + goto fail; + } + + /* Update Freesync settings. */ + get_freesync_config_for_crtc(dm_new_crtc_state, + dm_new_conn_state); + + return ret; + +fail: + if (new_stream) + dc_stream_release(new_stream); + return ret; +} + +static bool should_reset_plane(struct drm_atomic_state *state, + struct drm_plane *plane, + struct drm_plane_state *old_plane_state, + struct drm_plane_state *new_plane_state) +{ + struct drm_plane *other; + struct drm_plane_state *old_other_state, *new_other_state; + struct drm_crtc_state *new_crtc_state; + struct amdgpu_device *adev = drm_to_adev(plane->dev); + int i; + + /* + * TODO: Remove this hack for all asics once it proves that the + * fast updates works fine on DCN3.2+. + */ + if (adev->ip_versions[DCE_HWIP][0] < IP_VERSION(3, 2, 0) && state->allow_modeset) + return true; + + /* Exit early if we know that we're adding or removing the plane. */ + if (old_plane_state->crtc != new_plane_state->crtc) + return true; + + /* old crtc == new_crtc == NULL, plane not in context. */ + if (!new_plane_state->crtc) + return false; + + new_crtc_state = + drm_atomic_get_new_crtc_state(state, new_plane_state->crtc); + + if (!new_crtc_state) + return true; + + /* CRTC Degamma changes currently require us to recreate planes. */ + if (new_crtc_state->color_mgmt_changed) + return true; + + if (drm_atomic_crtc_needs_modeset(new_crtc_state)) + return true; + + /* + * If there are any new primary or overlay planes being added or + * removed then the z-order can potentially change. To ensure + * correct z-order and pipe acquisition the current DC architecture + * requires us to remove and recreate all existing planes. + * + * TODO: Come up with a more elegant solution for this. + */ + for_each_oldnew_plane_in_state(state, other, old_other_state, new_other_state, i) { + struct amdgpu_framebuffer *old_afb, *new_afb; + + if (other->type == DRM_PLANE_TYPE_CURSOR) + continue; + + if (old_other_state->crtc != new_plane_state->crtc && + new_other_state->crtc != new_plane_state->crtc) + continue; + + if (old_other_state->crtc != new_other_state->crtc) + return true; + + /* Src/dst size and scaling updates. */ + if (old_other_state->src_w != new_other_state->src_w || + old_other_state->src_h != new_other_state->src_h || + old_other_state->crtc_w != new_other_state->crtc_w || + old_other_state->crtc_h != new_other_state->crtc_h) + return true; + + /* Rotation / mirroring updates. */ + if (old_other_state->rotation != new_other_state->rotation) + return true; + + /* Blending updates. */ + if (old_other_state->pixel_blend_mode != + new_other_state->pixel_blend_mode) + return true; + + /* Alpha updates. */ + if (old_other_state->alpha != new_other_state->alpha) + return true; + + /* Colorspace changes. */ + if (old_other_state->color_range != new_other_state->color_range || + old_other_state->color_encoding != new_other_state->color_encoding) + return true; + + /* Framebuffer checks fall at the end. */ + if (!old_other_state->fb || !new_other_state->fb) + continue; + + /* Pixel format changes can require bandwidth updates. */ + if (old_other_state->fb->format != new_other_state->fb->format) + return true; + + old_afb = (struct amdgpu_framebuffer *)old_other_state->fb; + new_afb = (struct amdgpu_framebuffer *)new_other_state->fb; + + /* Tiling and DCC changes also require bandwidth updates. */ + if (old_afb->tiling_flags != new_afb->tiling_flags || + old_afb->base.modifier != new_afb->base.modifier) + return true; + } + + return false; +} + +static int dm_check_cursor_fb(struct amdgpu_crtc *new_acrtc, + struct drm_plane_state *new_plane_state, + struct drm_framebuffer *fb) +{ + struct amdgpu_device *adev = drm_to_adev(new_acrtc->base.dev); + struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(fb); + unsigned int pitch; + bool linear; + + if (fb->width > new_acrtc->max_cursor_width || + fb->height > new_acrtc->max_cursor_height) { + DRM_DEBUG_ATOMIC("Bad cursor FB size %dx%d\n", + new_plane_state->fb->width, + new_plane_state->fb->height); + return -EINVAL; + } + if (new_plane_state->src_w != fb->width << 16 || + new_plane_state->src_h != fb->height << 16) { + DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n"); + return -EINVAL; + } + + /* Pitch in pixels */ + pitch = fb->pitches[0] / fb->format->cpp[0]; + + if (fb->width != pitch) { + DRM_DEBUG_ATOMIC("Cursor FB width %d doesn't match pitch %d", + fb->width, pitch); + return -EINVAL; + } + + switch (pitch) { + case 64: + case 128: + case 256: + /* FB pitch is supported by cursor plane */ + break; + default: + DRM_DEBUG_ATOMIC("Bad cursor FB pitch %d px\n", pitch); + return -EINVAL; + } + + /* Core DRM takes care of checking FB modifiers, so we only need to + * check tiling flags when the FB doesn't have a modifier. + */ + if (!(fb->flags & DRM_MODE_FB_MODIFIERS)) { + if (adev->family < AMDGPU_FAMILY_AI) { + linear = AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_2D_TILED_THIN1 && + AMDGPU_TILING_GET(afb->tiling_flags, ARRAY_MODE) != DC_ARRAY_1D_TILED_THIN1 && + AMDGPU_TILING_GET(afb->tiling_flags, MICRO_TILE_MODE) == 0; + } else { + linear = AMDGPU_TILING_GET(afb->tiling_flags, SWIZZLE_MODE) == 0; + } + if (!linear) { + DRM_DEBUG_ATOMIC("Cursor FB not linear"); + return -EINVAL; + } + } + + return 0; +} + +static int dm_update_plane_state(struct dc *dc, + struct drm_atomic_state *state, + struct drm_plane *plane, + struct drm_plane_state *old_plane_state, + struct drm_plane_state *new_plane_state, + bool enable, + bool *lock_and_validation_needed, + bool *is_top_most_overlay) +{ + + struct dm_atomic_state *dm_state = NULL; + struct drm_crtc *new_plane_crtc, *old_plane_crtc; + struct drm_crtc_state *old_crtc_state, *new_crtc_state; + struct dm_crtc_state *dm_new_crtc_state, *dm_old_crtc_state; + struct dm_plane_state *dm_new_plane_state, *dm_old_plane_state; + struct amdgpu_crtc *new_acrtc; + bool needs_reset; + int ret = 0; + + + new_plane_crtc = new_plane_state->crtc; + old_plane_crtc = old_plane_state->crtc; + dm_new_plane_state = to_dm_plane_state(new_plane_state); + dm_old_plane_state = to_dm_plane_state(old_plane_state); + + if (plane->type == DRM_PLANE_TYPE_CURSOR) { + if (!enable || !new_plane_crtc || + drm_atomic_plane_disabling(plane->state, new_plane_state)) + return 0; + + new_acrtc = to_amdgpu_crtc(new_plane_crtc); + + if (new_plane_state->src_x != 0 || new_plane_state->src_y != 0) { + DRM_DEBUG_ATOMIC("Cropping not supported for cursor plane\n"); + return -EINVAL; + } + + if (new_plane_state->fb) { + ret = dm_check_cursor_fb(new_acrtc, new_plane_state, + new_plane_state->fb); + if (ret) + return ret; + } + + return 0; + } + + needs_reset = should_reset_plane(state, plane, old_plane_state, + new_plane_state); + + /* Remove any changed/removed planes */ + if (!enable) { + if (!needs_reset) + return 0; + + if (!old_plane_crtc) + return 0; + + old_crtc_state = drm_atomic_get_old_crtc_state( + state, old_plane_crtc); + dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); + + if (!dm_old_crtc_state->stream) + return 0; + + DRM_DEBUG_ATOMIC("Disabling DRM plane: %d on DRM crtc %d\n", + plane->base.id, old_plane_crtc->base.id); + + ret = dm_atomic_get_state(state, &dm_state); + if (ret) + return ret; + + if (!dc_remove_plane_from_context( + dc, + dm_old_crtc_state->stream, + dm_old_plane_state->dc_state, + dm_state->context)) { + + return -EINVAL; + } + + if (dm_old_plane_state->dc_state) + dc_plane_state_release(dm_old_plane_state->dc_state); + + dm_new_plane_state->dc_state = NULL; + + *lock_and_validation_needed = true; + + } else { /* Add new planes */ + struct dc_plane_state *dc_new_plane_state; + + if (drm_atomic_plane_disabling(plane->state, new_plane_state)) + return 0; + + if (!new_plane_crtc) + return 0; + + new_crtc_state = drm_atomic_get_new_crtc_state(state, new_plane_crtc); + dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); + + if (!dm_new_crtc_state->stream) + return 0; + + if (!needs_reset) + return 0; + + ret = amdgpu_dm_plane_helper_check_state(new_plane_state, new_crtc_state); + if (ret) + return ret; + + WARN_ON(dm_new_plane_state->dc_state); + + dc_new_plane_state = dc_create_plane_state(dc); + if (!dc_new_plane_state) + return -ENOMEM; + + /* Block top most plane from being a video plane */ + if (plane->type == DRM_PLANE_TYPE_OVERLAY) { + if (is_video_format(new_plane_state->fb->format->format) && *is_top_most_overlay) + return -EINVAL; + + *is_top_most_overlay = false; + } + + DRM_DEBUG_ATOMIC("Enabling DRM plane: %d on DRM crtc %d\n", + plane->base.id, new_plane_crtc->base.id); + + ret = fill_dc_plane_attributes( + drm_to_adev(new_plane_crtc->dev), + dc_new_plane_state, + new_plane_state, + new_crtc_state); + if (ret) { + dc_plane_state_release(dc_new_plane_state); + return ret; + } + + ret = dm_atomic_get_state(state, &dm_state); + if (ret) { + dc_plane_state_release(dc_new_plane_state); + return ret; + } + + /* + * Any atomic check errors that occur after this will + * not need a release. The plane state will be attached + * to the stream, and therefore part of the atomic + * state. It'll be released when the atomic state is + * cleaned. + */ + if (!dc_add_plane_to_context( + dc, + dm_new_crtc_state->stream, + dc_new_plane_state, + dm_state->context)) { + + dc_plane_state_release(dc_new_plane_state); + return -EINVAL; + } + + dm_new_plane_state->dc_state = dc_new_plane_state; + + dm_new_crtc_state->mpo_requested |= (plane->type == DRM_PLANE_TYPE_OVERLAY); + + /* Tell DC to do a full surface update every time there + * is a plane change. Inefficient, but works for now. + */ + dm_new_plane_state->dc_state->update_flags.bits.full_update = 1; + + *lock_and_validation_needed = true; + } + + + return ret; +} + +static void dm_get_oriented_plane_size(struct drm_plane_state *plane_state, + int *src_w, int *src_h) +{ + switch (plane_state->rotation & DRM_MODE_ROTATE_MASK) { + case DRM_MODE_ROTATE_90: + case DRM_MODE_ROTATE_270: + *src_w = plane_state->src_h >> 16; + *src_h = plane_state->src_w >> 16; + break; + case DRM_MODE_ROTATE_0: + case DRM_MODE_ROTATE_180: + default: + *src_w = plane_state->src_w >> 16; + *src_h = plane_state->src_h >> 16; + break; + } +} + +static void +dm_get_plane_scale(struct drm_plane_state *plane_state, + int *out_plane_scale_w, int *out_plane_scale_h) +{ + int plane_src_w, plane_src_h; + + dm_get_oriented_plane_size(plane_state, &plane_src_w, &plane_src_h); + *out_plane_scale_w = plane_state->crtc_w * 1000 / plane_src_w; + *out_plane_scale_h = plane_state->crtc_h * 1000 / plane_src_h; +} + +static int dm_check_crtc_cursor(struct drm_atomic_state *state, + struct drm_crtc *crtc, + struct drm_crtc_state *new_crtc_state) +{ + struct drm_plane *cursor = crtc->cursor, *plane, *underlying; + struct drm_plane_state *old_plane_state, *new_plane_state; + struct drm_plane_state *new_cursor_state, *new_underlying_state; + int i; + int cursor_scale_w, cursor_scale_h, underlying_scale_w, underlying_scale_h; + bool any_relevant_change = false; + + /* On DCE and DCN there is no dedicated hardware cursor plane. We get a + * cursor per pipe but it's going to inherit the scaling and + * positioning from the underlying pipe. Check the cursor plane's + * blending properties match the underlying planes'. + */ + + /* If no plane was enabled or changed scaling, no need to check again */ + for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { + int new_scale_w, new_scale_h, old_scale_w, old_scale_h; + + if (!new_plane_state || !new_plane_state->fb || new_plane_state->crtc != crtc) + continue; + + if (!old_plane_state || !old_plane_state->fb || old_plane_state->crtc != crtc) { + any_relevant_change = true; + break; + } + + if (new_plane_state->fb == old_plane_state->fb && + new_plane_state->crtc_w == old_plane_state->crtc_w && + new_plane_state->crtc_h == old_plane_state->crtc_h) + continue; + + dm_get_plane_scale(new_plane_state, &new_scale_w, &new_scale_h); + dm_get_plane_scale(old_plane_state, &old_scale_w, &old_scale_h); + + if (new_scale_w != old_scale_w || new_scale_h != old_scale_h) { + any_relevant_change = true; + break; + } + } + + if (!any_relevant_change) + return 0; + + new_cursor_state = drm_atomic_get_plane_state(state, cursor); + if (IS_ERR(new_cursor_state)) + return PTR_ERR(new_cursor_state); + + if (!new_cursor_state->fb) + return 0; + + dm_get_plane_scale(new_cursor_state, &cursor_scale_w, &cursor_scale_h); + + /* Need to check all enabled planes, even if this commit doesn't change + * their state + */ + i = drm_atomic_add_affected_planes(state, crtc); + if (i) + return i; + + for_each_new_plane_in_state_reverse(state, underlying, new_underlying_state, i) { + /* Narrow down to non-cursor planes on the same CRTC as the cursor */ + if (new_underlying_state->crtc != crtc || underlying == crtc->cursor) + continue; + + /* Ignore disabled planes */ + if (!new_underlying_state->fb) + continue; + + dm_get_plane_scale(new_underlying_state, + &underlying_scale_w, &underlying_scale_h); + + if (cursor_scale_w != underlying_scale_w || + cursor_scale_h != underlying_scale_h) { + drm_dbg_atomic(crtc->dev, + "Cursor [PLANE:%d:%s] scaling doesn't match underlying [PLANE:%d:%s]\n", + cursor->base.id, cursor->name, underlying->base.id, underlying->name); + return -EINVAL; + } + + /* If this plane covers the whole CRTC, no need to check planes underneath */ + if (new_underlying_state->crtc_x <= 0 && + new_underlying_state->crtc_y <= 0 && + new_underlying_state->crtc_x + new_underlying_state->crtc_w >= new_crtc_state->mode.hdisplay && + new_underlying_state->crtc_y + new_underlying_state->crtc_h >= new_crtc_state->mode.vdisplay) + break; + } + + return 0; +} + +static int add_affected_mst_dsc_crtcs(struct drm_atomic_state *state, struct drm_crtc *crtc) +{ + struct drm_connector *connector; + struct drm_connector_state *conn_state, *old_conn_state; + struct amdgpu_dm_connector *aconnector = NULL; + int i; + + for_each_oldnew_connector_in_state(state, connector, old_conn_state, conn_state, i) { + if (!conn_state->crtc) + conn_state = old_conn_state; + + if (conn_state->crtc != crtc) + continue; + + aconnector = to_amdgpu_dm_connector(connector); + if (!aconnector->mst_output_port || !aconnector->mst_root) + aconnector = NULL; + else + break; + } + + if (!aconnector) + return 0; + + return drm_dp_mst_add_affected_dsc_crtcs(state, &aconnector->mst_root->mst_mgr); +} + +/** + * amdgpu_dm_atomic_check() - Atomic check implementation for AMDgpu DM. + * + * @dev: The DRM device + * @state: The atomic state to commit + * + * Validate that the given atomic state is programmable by DC into hardware. + * This involves constructing a &struct dc_state reflecting the new hardware + * state we wish to commit, then querying DC to see if it is programmable. It's + * important not to modify the existing DC state. Otherwise, atomic_check + * may unexpectedly commit hardware changes. + * + * When validating the DC state, it's important that the right locks are + * acquired. For full updates case which removes/adds/updates streams on one + * CRTC while flipping on another CRTC, acquiring global lock will guarantee + * that any such full update commit will wait for completion of any outstanding + * flip using DRMs synchronization events. + * + * Note that DM adds the affected connectors for all CRTCs in state, when that + * might not seem necessary. This is because DC stream creation requires the + * DC sink, which is tied to the DRM connector state. Cleaning this up should + * be possible but non-trivial - a possible TODO item. + * + * Return: -Error code if validation failed. + */ +static int amdgpu_dm_atomic_check(struct drm_device *dev, + struct drm_atomic_state *state) +{ + struct amdgpu_device *adev = drm_to_adev(dev); + struct dm_atomic_state *dm_state = NULL; + struct dc *dc = adev->dm.dc; + struct drm_connector *connector; + struct drm_connector_state *old_con_state, *new_con_state; + struct drm_crtc *crtc; + struct drm_crtc_state *old_crtc_state, *new_crtc_state; + struct drm_plane *plane; + struct drm_plane_state *old_plane_state, *new_plane_state; + enum dc_status status; + int ret, i; + bool lock_and_validation_needed = false; + bool is_top_most_overlay = true; + struct dm_crtc_state *dm_old_crtc_state, *dm_new_crtc_state; + struct drm_dp_mst_topology_mgr *mgr; + struct drm_dp_mst_topology_state *mst_state; + struct dsc_mst_fairness_vars vars[MAX_PIPES]; + + trace_amdgpu_dm_atomic_check_begin(state); + + ret = drm_atomic_helper_check_modeset(dev, state); + if (ret) { + DRM_DEBUG_DRIVER("drm_atomic_helper_check_modeset() failed\n"); + goto fail; + } + + /* Check connector changes */ + for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { + struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); + struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); + + /* Skip connectors that are disabled or part of modeset already. */ + if (!new_con_state->crtc) + continue; + + new_crtc_state = drm_atomic_get_crtc_state(state, new_con_state->crtc); + if (IS_ERR(new_crtc_state)) { + DRM_DEBUG_DRIVER("drm_atomic_get_crtc_state() failed\n"); + ret = PTR_ERR(new_crtc_state); + goto fail; + } + + if (dm_old_con_state->abm_level != dm_new_con_state->abm_level || + dm_old_con_state->scaling != dm_new_con_state->scaling) + new_crtc_state->connectors_changed = true; + } + + if (dc_resource_is_dsc_encoding_supported(dc)) { + for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { + if (drm_atomic_crtc_needs_modeset(new_crtc_state)) { + ret = add_affected_mst_dsc_crtcs(state, crtc); + if (ret) { + DRM_DEBUG_DRIVER("add_affected_mst_dsc_crtcs() failed\n"); + goto fail; + } + } + } + } + for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { + dm_old_crtc_state = to_dm_crtc_state(old_crtc_state); + + if (!drm_atomic_crtc_needs_modeset(new_crtc_state) && + !new_crtc_state->color_mgmt_changed && + old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled && + dm_old_crtc_state->dsc_force_changed == false) + continue; + + ret = amdgpu_dm_verify_lut_sizes(new_crtc_state); + if (ret) { + DRM_DEBUG_DRIVER("amdgpu_dm_verify_lut_sizes() failed\n"); + goto fail; + } + + if (!new_crtc_state->enable) + continue; + + ret = drm_atomic_add_affected_connectors(state, crtc); + if (ret) { + DRM_DEBUG_DRIVER("drm_atomic_add_affected_connectors() failed\n"); + goto fail; + } + + ret = drm_atomic_add_affected_planes(state, crtc); + if (ret) { + DRM_DEBUG_DRIVER("drm_atomic_add_affected_planes() failed\n"); + goto fail; + } + + if (dm_old_crtc_state->dsc_force_changed) + new_crtc_state->mode_changed = true; + } + + /* + * Add all primary and overlay planes on the CRTC to the state + * whenever a plane is enabled to maintain correct z-ordering + * and to enable fast surface updates. + */ + drm_for_each_crtc(crtc, dev) { + bool modified = false; + + for_each_oldnew_plane_in_state(state, plane, old_plane_state, new_plane_state, i) { + if (plane->type == DRM_PLANE_TYPE_CURSOR) + continue; + + if (new_plane_state->crtc == crtc || + old_plane_state->crtc == crtc) { + modified = true; + break; + } + } + + if (!modified) + continue; + + drm_for_each_plane_mask(plane, state->dev, crtc->state->plane_mask) { + if (plane->type == DRM_PLANE_TYPE_CURSOR) + continue; + + new_plane_state = + drm_atomic_get_plane_state(state, plane); + + if (IS_ERR(new_plane_state)) { + ret = PTR_ERR(new_plane_state); + DRM_DEBUG_DRIVER("new_plane_state is BAD\n"); + goto fail; + } + } + } + + /* + * DC consults the zpos (layer_index in DC terminology) to determine the + * hw plane on which to enable the hw cursor (see + * `dcn10_can_pipe_disable_cursor`). By now, all modified planes are in + * atomic state, so call drm helper to normalize zpos. + */ + ret = drm_atomic_normalize_zpos(dev, state); + if (ret) { + drm_dbg(dev, "drm_atomic_normalize_zpos() failed\n"); + goto fail; + } + + /* Remove exiting planes if they are modified */ + for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) { + if (old_plane_state->fb && new_plane_state->fb && + get_mem_type(old_plane_state->fb) != + get_mem_type(new_plane_state->fb)) + lock_and_validation_needed = true; + + ret = dm_update_plane_state(dc, state, plane, + old_plane_state, + new_plane_state, + false, + &lock_and_validation_needed, + &is_top_most_overlay); + if (ret) { + DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n"); + goto fail; + } + } + + /* Disable all crtcs which require disable */ + for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { + ret = dm_update_crtc_state(&adev->dm, state, crtc, + old_crtc_state, + new_crtc_state, + false, + &lock_and_validation_needed); + if (ret) { + DRM_DEBUG_DRIVER("DISABLE: dm_update_crtc_state() failed\n"); + goto fail; + } + } + + /* Enable all crtcs which require enable */ + for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { + ret = dm_update_crtc_state(&adev->dm, state, crtc, + old_crtc_state, + new_crtc_state, + true, + &lock_and_validation_needed); + if (ret) { + DRM_DEBUG_DRIVER("ENABLE: dm_update_crtc_state() failed\n"); + goto fail; + } + } + + /* Add new/modified planes */ + for_each_oldnew_plane_in_state_reverse(state, plane, old_plane_state, new_plane_state, i) { + ret = dm_update_plane_state(dc, state, plane, + old_plane_state, + new_plane_state, + true, + &lock_and_validation_needed, + &is_top_most_overlay); + if (ret) { + DRM_DEBUG_DRIVER("dm_update_plane_state() failed\n"); + goto fail; + } + } + + if (dc_resource_is_dsc_encoding_supported(dc)) { + ret = pre_validate_dsc(state, &dm_state, vars); + if (ret != 0) + goto fail; + } + + /* Run this here since we want to validate the streams we created */ + ret = drm_atomic_helper_check_planes(dev, state); + if (ret) { + DRM_DEBUG_DRIVER("drm_atomic_helper_check_planes() failed\n"); + goto fail; + } + + for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { + dm_new_crtc_state = to_dm_crtc_state(new_crtc_state); + if (dm_new_crtc_state->mpo_requested) + DRM_DEBUG_DRIVER("MPO enablement requested on crtc:[%p]\n", crtc); + } + + /* Check cursor planes scaling */ + for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { + ret = dm_check_crtc_cursor(state, crtc, new_crtc_state); + if (ret) { + DRM_DEBUG_DRIVER("dm_check_crtc_cursor() failed\n"); + goto fail; + } + } + + if (state->legacy_cursor_update) { + /* + * This is a fast cursor update coming from the plane update + * helper, check if it can be done asynchronously for better + * performance. + */ + state->async_update = + !drm_atomic_helper_async_check(dev, state); + + /* + * Skip the remaining global validation if this is an async + * update. Cursor updates can be done without affecting + * state or bandwidth calcs and this avoids the performance + * penalty of locking the private state object and + * allocating a new dc_state. + */ + if (state->async_update) + return 0; + } + + /* Check scaling and underscan changes*/ + /* TODO Removed scaling changes validation due to inability to commit + * new stream into context w\o causing full reset. Need to + * decide how to handle. + */ + for_each_oldnew_connector_in_state(state, connector, old_con_state, new_con_state, i) { + struct dm_connector_state *dm_old_con_state = to_dm_connector_state(old_con_state); + struct dm_connector_state *dm_new_con_state = to_dm_connector_state(new_con_state); + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(dm_new_con_state->base.crtc); + + /* Skip any modesets/resets */ + if (!acrtc || drm_atomic_crtc_needs_modeset( + drm_atomic_get_new_crtc_state(state, &acrtc->base))) + continue; + + /* Skip any thing not scale or underscan changes */ + if (!is_scaling_state_different(dm_new_con_state, dm_old_con_state)) + continue; + + lock_and_validation_needed = true; + } + + /* set the slot info for each mst_state based on the link encoding format */ + for_each_new_mst_mgr_in_state(state, mgr, mst_state, i) { + struct amdgpu_dm_connector *aconnector; + struct drm_connector *connector; + struct drm_connector_list_iter iter; + u8 link_coding_cap; + + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { + if (connector->index == mst_state->mgr->conn_base_id) { + aconnector = to_amdgpu_dm_connector(connector); + link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(aconnector->dc_link); + drm_dp_mst_update_slots(mst_state, link_coding_cap); + + break; + } + } + drm_connector_list_iter_end(&iter); + } + + /** + * Streams and planes are reset when there are changes that affect + * bandwidth. Anything that affects bandwidth needs to go through + * DC global validation to ensure that the configuration can be applied + * to hardware. + * + * We have to currently stall out here in atomic_check for outstanding + * commits to finish in this case because our IRQ handlers reference + * DRM state directly - we can end up disabling interrupts too early + * if we don't. + * + * TODO: Remove this stall and drop DM state private objects. + */ + if (lock_and_validation_needed) { + ret = dm_atomic_get_state(state, &dm_state); + if (ret) { + DRM_DEBUG_DRIVER("dm_atomic_get_state() failed\n"); + goto fail; + } + + ret = do_aquire_global_lock(dev, state); + if (ret) { + DRM_DEBUG_DRIVER("do_aquire_global_lock() failed\n"); + goto fail; + } + + ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars); + if (ret) { + DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n"); + ret = -EINVAL; + goto fail; + } + + ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars); + if (ret) { + DRM_DEBUG_DRIVER("dm_update_mst_vcpi_slots_for_dsc() failed\n"); + goto fail; + } + + /* + * Perform validation of MST topology in the state: + * We need to perform MST atomic check before calling + * dc_validate_global_state(), or there is a chance + * to get stuck in an infinite loop and hang eventually. + */ + ret = drm_dp_mst_atomic_check(state); + if (ret) { + DRM_DEBUG_DRIVER("drm_dp_mst_atomic_check() failed\n"); + goto fail; + } + status = dc_validate_global_state(dc, dm_state->context, true); + if (status != DC_OK) { + DRM_DEBUG_DRIVER("DC global validation failure: %s (%d)", + dc_status_to_str(status), status); + ret = -EINVAL; + goto fail; + } + } else { + /* + * The commit is a fast update. Fast updates shouldn't change + * the DC context, affect global validation, and can have their + * commit work done in parallel with other commits not touching + * the same resource. If we have a new DC context as part of + * the DM atomic state from validation we need to free it and + * retain the existing one instead. + * + * Furthermore, since the DM atomic state only contains the DC + * context and can safely be annulled, we can free the state + * and clear the associated private object now to free + * some memory and avoid a possible use-after-free later. + */ + + for (i = 0; i < state->num_private_objs; i++) { + struct drm_private_obj *obj = state->private_objs[i].ptr; + + if (obj->funcs == adev->dm.atomic_obj.funcs) { + int j = state->num_private_objs-1; + + dm_atomic_destroy_state(obj, + state->private_objs[i].state); + + /* If i is not at the end of the array then the + * last element needs to be moved to where i was + * before the array can safely be truncated. + */ + if (i != j) + state->private_objs[i] = + state->private_objs[j]; + + state->private_objs[j].ptr = NULL; + state->private_objs[j].state = NULL; + state->private_objs[j].old_state = NULL; + state->private_objs[j].new_state = NULL; + + state->num_private_objs = j; + break; + } + } + } + + /* Store the overall update type for use later in atomic check. */ + for_each_new_crtc_in_state(state, crtc, new_crtc_state, i) { + struct dm_crtc_state *dm_new_crtc_state = + to_dm_crtc_state(new_crtc_state); + + /* + * Only allow async flips for fast updates that don't change + * the FB pitch, the DCC state, rotation, etc. + */ + if (new_crtc_state->async_flip && lock_and_validation_needed) { + drm_dbg_atomic(crtc->dev, + "[CRTC:%d:%s] async flips are only supported for fast updates\n", + crtc->base.id, crtc->name); + ret = -EINVAL; + goto fail; + } + + dm_new_crtc_state->update_type = lock_and_validation_needed ? + UPDATE_TYPE_FULL : UPDATE_TYPE_FAST; + } + + /* Must be success */ + WARN_ON(ret); + + trace_amdgpu_dm_atomic_check_finish(state, ret); + + return ret; + +fail: + if (ret == -EDEADLK) + DRM_DEBUG_DRIVER("Atomic check stopped to avoid deadlock.\n"); + else if (ret == -EINTR || ret == -EAGAIN || ret == -ERESTARTSYS) + DRM_DEBUG_DRIVER("Atomic check stopped due to signal.\n"); + else + DRM_DEBUG_DRIVER("Atomic check failed with err: %d\n", ret); + + trace_amdgpu_dm_atomic_check_finish(state, ret); + + return ret; +} + +static bool is_dp_capable_without_timing_msa(struct dc *dc, + struct amdgpu_dm_connector *amdgpu_dm_connector) +{ + u8 dpcd_data; + bool capable = false; + + if (amdgpu_dm_connector->dc_link && + dm_helpers_dp_read_dpcd( + NULL, + amdgpu_dm_connector->dc_link, + DP_DOWN_STREAM_PORT_COUNT, + &dpcd_data, + sizeof(dpcd_data))) { + capable = (dpcd_data & DP_MSA_TIMING_PAR_IGNORED) ? true:false; + } + + return capable; +} + +static bool dm_edid_parser_send_cea(struct amdgpu_display_manager *dm, + unsigned int offset, + unsigned int total_length, + u8 *data, + unsigned int length, + struct amdgpu_hdmi_vsdb_info *vsdb) +{ + bool res; + union dmub_rb_cmd cmd; + struct dmub_cmd_send_edid_cea *input; + struct dmub_cmd_edid_cea_output *output; + + if (length > DMUB_EDID_CEA_DATA_CHUNK_BYTES) + return false; + + memset(&cmd, 0, sizeof(cmd)); + + input = &cmd.edid_cea.data.input; + + cmd.edid_cea.header.type = DMUB_CMD__EDID_CEA; + cmd.edid_cea.header.sub_type = 0; + cmd.edid_cea.header.payload_bytes = + sizeof(cmd.edid_cea) - sizeof(cmd.edid_cea.header); + input->offset = offset; + input->length = length; + input->cea_total_length = total_length; + memcpy(input->payload, data, length); + + res = dm_execute_dmub_cmd(dm->dc->ctx, &cmd, DM_DMUB_WAIT_TYPE_WAIT_WITH_REPLY); + if (!res) { + DRM_ERROR("EDID CEA parser failed\n"); + return false; + } + + output = &cmd.edid_cea.data.output; + + if (output->type == DMUB_CMD__EDID_CEA_ACK) { + if (!output->ack.success) { + DRM_ERROR("EDID CEA ack failed at offset %d\n", + output->ack.offset); + } + } else if (output->type == DMUB_CMD__EDID_CEA_AMD_VSDB) { + if (!output->amd_vsdb.vsdb_found) + return false; + + vsdb->freesync_supported = output->amd_vsdb.freesync_supported; + vsdb->amd_vsdb_version = output->amd_vsdb.amd_vsdb_version; + vsdb->min_refresh_rate_hz = output->amd_vsdb.min_frame_rate; + vsdb->max_refresh_rate_hz = output->amd_vsdb.max_frame_rate; + } else { + DRM_WARN("Unknown EDID CEA parser results\n"); + return false; + } + + return true; +} + +static bool parse_edid_cea_dmcu(struct amdgpu_display_manager *dm, + u8 *edid_ext, int len, + struct amdgpu_hdmi_vsdb_info *vsdb_info) +{ + int i; + + /* send extension block to DMCU for parsing */ + for (i = 0; i < len; i += 8) { + bool res; + int offset; + + /* send 8 bytes a time */ + if (!dc_edid_parser_send_cea(dm->dc, i, len, &edid_ext[i], 8)) + return false; + + if (i+8 == len) { + /* EDID block sent completed, expect result */ + int version, min_rate, max_rate; + + res = dc_edid_parser_recv_amd_vsdb(dm->dc, &version, &min_rate, &max_rate); + if (res) { + /* amd vsdb found */ + vsdb_info->freesync_supported = 1; + vsdb_info->amd_vsdb_version = version; + vsdb_info->min_refresh_rate_hz = min_rate; + vsdb_info->max_refresh_rate_hz = max_rate; + return true; + } + /* not amd vsdb */ + return false; + } + + /* check for ack*/ + res = dc_edid_parser_recv_cea_ack(dm->dc, &offset); + if (!res) + return false; + } + + return false; +} + +static bool parse_edid_cea_dmub(struct amdgpu_display_manager *dm, + u8 *edid_ext, int len, + struct amdgpu_hdmi_vsdb_info *vsdb_info) +{ + int i; + + /* send extension block to DMCU for parsing */ + for (i = 0; i < len; i += 8) { + /* send 8 bytes a time */ + if (!dm_edid_parser_send_cea(dm, i, len, &edid_ext[i], 8, vsdb_info)) + return false; + } + + return vsdb_info->freesync_supported; +} + +static bool parse_edid_cea(struct amdgpu_dm_connector *aconnector, + u8 *edid_ext, int len, + struct amdgpu_hdmi_vsdb_info *vsdb_info) +{ + struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev); + bool ret; + + mutex_lock(&adev->dm.dc_lock); + if (adev->dm.dmub_srv) + ret = parse_edid_cea_dmub(&adev->dm, edid_ext, len, vsdb_info); + else + ret = parse_edid_cea_dmcu(&adev->dm, edid_ext, len, vsdb_info); + mutex_unlock(&adev->dm.dc_lock); + return ret; +} + +static int parse_amd_vsdb(struct amdgpu_dm_connector *aconnector, + struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info) +{ + u8 *edid_ext = NULL; + int i; + int j = 0; + + if (edid == NULL || edid->extensions == 0) + return -ENODEV; + + /* Find DisplayID extension */ + for (i = 0; i < edid->extensions; i++) { + edid_ext = (void *)(edid + (i + 1)); + if (edid_ext[0] == DISPLAYID_EXT) + break; + } + + while (j < EDID_LENGTH) { + struct amd_vsdb_block *amd_vsdb = (struct amd_vsdb_block *)&edid_ext[j]; + unsigned int ieeeId = (amd_vsdb->ieee_id[2] << 16) | (amd_vsdb->ieee_id[1] << 8) | (amd_vsdb->ieee_id[0]); + + if (ieeeId == HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_IEEE_REGISTRATION_ID && + amd_vsdb->version == HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_VERSION_3) { + vsdb_info->replay_mode = (amd_vsdb->feature_caps & AMD_VSDB_VERSION_3_FEATURECAP_REPLAYMODE) ? true : false; + vsdb_info->amd_vsdb_version = HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_VERSION_3; + DRM_DEBUG_KMS("Panel supports Replay Mode: %d\n", vsdb_info->replay_mode); + + return true; + } + j++; + } + + return false; +} + +static int parse_hdmi_amd_vsdb(struct amdgpu_dm_connector *aconnector, + struct edid *edid, struct amdgpu_hdmi_vsdb_info *vsdb_info) +{ + u8 *edid_ext = NULL; + int i; + bool valid_vsdb_found = false; + + /*----- drm_find_cea_extension() -----*/ + /* No EDID or EDID extensions */ + if (edid == NULL || edid->extensions == 0) + return -ENODEV; + + /* Find CEA extension */ + for (i = 0; i < edid->extensions; i++) { + edid_ext = (uint8_t *)edid + EDID_LENGTH * (i + 1); + if (edid_ext[0] == CEA_EXT) + break; + } + + if (i == edid->extensions) + return -ENODEV; + + /*----- cea_db_offsets() -----*/ + if (edid_ext[0] != CEA_EXT) + return -ENODEV; + + valid_vsdb_found = parse_edid_cea(aconnector, edid_ext, EDID_LENGTH, vsdb_info); + + return valid_vsdb_found ? i : -ENODEV; +} + +/** + * amdgpu_dm_update_freesync_caps - Update Freesync capabilities + * + * @connector: Connector to query. + * @edid: EDID from monitor + * + * Amdgpu supports Freesync in DP and HDMI displays, and it is required to keep + * track of some of the display information in the internal data struct used by + * amdgpu_dm. This function checks which type of connector we need to set the + * FreeSync parameters. + */ +void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, + struct edid *edid) +{ + int i = 0; + struct detailed_timing *timing; + struct detailed_non_pixel *data; + struct detailed_data_monitor_range *range; + struct amdgpu_dm_connector *amdgpu_dm_connector = + to_amdgpu_dm_connector(connector); + struct dm_connector_state *dm_con_state = NULL; + struct dc_sink *sink; + + struct drm_device *dev = connector->dev; + struct amdgpu_device *adev = drm_to_adev(dev); + struct amdgpu_hdmi_vsdb_info vsdb_info = {0}; + bool freesync_capable = false; + enum adaptive_sync_type as_type = ADAPTIVE_SYNC_TYPE_NONE; + + if (!connector->state) { + DRM_ERROR("%s - Connector has no state", __func__); + goto update; + } + + sink = amdgpu_dm_connector->dc_sink ? + amdgpu_dm_connector->dc_sink : + amdgpu_dm_connector->dc_em_sink; + + if (!edid || !sink) { + dm_con_state = to_dm_connector_state(connector->state); + + amdgpu_dm_connector->min_vfreq = 0; + amdgpu_dm_connector->max_vfreq = 0; + amdgpu_dm_connector->pixel_clock_mhz = 0; + connector->display_info.monitor_range.min_vfreq = 0; + connector->display_info.monitor_range.max_vfreq = 0; + freesync_capable = false; + + goto update; + } + + dm_con_state = to_dm_connector_state(connector->state); + + if (!adev->dm.freesync_module) + goto update; + + if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT + || sink->sink_signal == SIGNAL_TYPE_EDP) { + bool edid_check_required = false; + + if (edid) { + edid_check_required = is_dp_capable_without_timing_msa( + adev->dm.dc, + amdgpu_dm_connector); + } + + if (edid_check_required == true && (edid->version > 1 || + (edid->version == 1 && edid->revision > 1))) { + for (i = 0; i < 4; i++) { + + timing = &edid->detailed_timings[i]; + data = &timing->data.other_data; + range = &data->data.range; + /* + * Check if monitor has continuous frequency mode + */ + if (data->type != EDID_DETAIL_MONITOR_RANGE) + continue; + /* + * Check for flag range limits only. If flag == 1 then + * no additional timing information provided. + * Default GTF, GTF Secondary curve and CVT are not + * supported + */ + if (range->flags != 1) + continue; + + amdgpu_dm_connector->min_vfreq = range->min_vfreq; + amdgpu_dm_connector->max_vfreq = range->max_vfreq; + amdgpu_dm_connector->pixel_clock_mhz = + range->pixel_clock_mhz * 10; + + connector->display_info.monitor_range.min_vfreq = range->min_vfreq; + connector->display_info.monitor_range.max_vfreq = range->max_vfreq; + + break; + } + + if (amdgpu_dm_connector->max_vfreq - + amdgpu_dm_connector->min_vfreq > 10) { + + freesync_capable = true; + } + } + parse_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info); + + if (vsdb_info.replay_mode) { + amdgpu_dm_connector->vsdb_info.replay_mode = vsdb_info.replay_mode; + amdgpu_dm_connector->vsdb_info.amd_vsdb_version = vsdb_info.amd_vsdb_version; + amdgpu_dm_connector->as_type = ADAPTIVE_SYNC_TYPE_EDP; + } + + } else if (edid && sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A) { + i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info); + if (i >= 0 && vsdb_info.freesync_supported) { + timing = &edid->detailed_timings[i]; + data = &timing->data.other_data; + + amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz; + amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz; + if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) + freesync_capable = true; + + connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz; + connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz; + } + } + + as_type = dm_get_adaptive_sync_support_type(amdgpu_dm_connector->dc_link); + + if (as_type == FREESYNC_TYPE_PCON_IN_WHITELIST) { + i = parse_hdmi_amd_vsdb(amdgpu_dm_connector, edid, &vsdb_info); + if (i >= 0 && vsdb_info.freesync_supported && vsdb_info.amd_vsdb_version > 0) { + + amdgpu_dm_connector->pack_sdp_v1_3 = true; + amdgpu_dm_connector->as_type = as_type; + amdgpu_dm_connector->vsdb_info = vsdb_info; + + amdgpu_dm_connector->min_vfreq = vsdb_info.min_refresh_rate_hz; + amdgpu_dm_connector->max_vfreq = vsdb_info.max_refresh_rate_hz; + if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) + freesync_capable = true; + + connector->display_info.monitor_range.min_vfreq = vsdb_info.min_refresh_rate_hz; + connector->display_info.monitor_range.max_vfreq = vsdb_info.max_refresh_rate_hz; + } + } + +update: + if (dm_con_state) + dm_con_state->freesync_capable = freesync_capable; + + if (connector->vrr_capable_property) + drm_connector_set_vrr_capable_property(connector, + freesync_capable); +} + +void amdgpu_dm_trigger_timing_sync(struct drm_device *dev) +{ + struct amdgpu_device *adev = drm_to_adev(dev); + struct dc *dc = adev->dm.dc; + int i; + + mutex_lock(&adev->dm.dc_lock); + if (dc->current_state) { + for (i = 0; i < dc->current_state->stream_count; ++i) + dc->current_state->streams[i] + ->triggered_crtc_reset.enabled = + adev->dm.force_timing_sync; + + dm_enable_per_frame_crtc_master_sync(dc->current_state); + dc_trigger_sync(dc, dc->current_state); + } + mutex_unlock(&adev->dm.dc_lock); +} + +void dm_write_reg_func(const struct dc_context *ctx, uint32_t address, + u32 value, const char *func_name) +{ +#ifdef DM_CHECK_ADDR_0 + if (address == 0) { + DC_ERR("invalid register write. address = 0"); + return; + } +#endif + cgs_write_register(ctx->cgs_device, address, value); + trace_amdgpu_dc_wreg(&ctx->perf_trace->write_count, address, value); +} + +uint32_t dm_read_reg_func(const struct dc_context *ctx, uint32_t address, + const char *func_name) +{ + u32 value; +#ifdef DM_CHECK_ADDR_0 + if (address == 0) { + DC_ERR("invalid register read; address = 0\n"); + return 0; + } +#endif + + if (ctx->dmub_srv && + ctx->dmub_srv->reg_helper_offload.gather_in_progress && + !ctx->dmub_srv->reg_helper_offload.should_burst_write) { + ASSERT(false); + return 0; + } + + value = cgs_read_register(ctx->cgs_device, address); + + trace_amdgpu_dc_rreg(&ctx->perf_trace->read_count, address, value); + + return value; +} + +int amdgpu_dm_process_dmub_aux_transfer_sync( + struct dc_context *ctx, + unsigned int link_index, + struct aux_payload *payload, + enum aux_return_code_type *operation_result) +{ + struct amdgpu_device *adev = ctx->driver_context; + struct dmub_notification *p_notify = adev->dm.dmub_notify; + int ret = -1; + + mutex_lock(&adev->dm.dpia_aux_lock); + if (!dc_process_dmub_aux_transfer_async(ctx->dc, link_index, payload)) { + *operation_result = AUX_RET_ERROR_ENGINE_ACQUIRE; + goto out; + } + + if (!wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) { + DRM_ERROR("wait_for_completion_timeout timeout!"); + *operation_result = AUX_RET_ERROR_TIMEOUT; + goto out; + } + + if (p_notify->result != AUX_RET_SUCCESS) { + /* + * Transient states before tunneling is enabled could + * lead to this error. We can ignore this for now. + */ + if (p_notify->result != AUX_RET_ERROR_PROTOCOL_ERROR) { + DRM_WARN("DPIA AUX failed on 0x%x(%d), error %d\n", + payload->address, payload->length, + p_notify->result); + } + *operation_result = AUX_RET_ERROR_INVALID_REPLY; + goto out; + } + + + payload->reply[0] = adev->dm.dmub_notify->aux_reply.command; + if (!payload->write && p_notify->aux_reply.length && + (payload->reply[0] == AUX_TRANSACTION_REPLY_AUX_ACK)) { + + if (payload->length != p_notify->aux_reply.length) { + DRM_WARN("invalid read length %d from DPIA AUX 0x%x(%d)!\n", + p_notify->aux_reply.length, + payload->address, payload->length); + *operation_result = AUX_RET_ERROR_INVALID_REPLY; + goto out; + } + + memcpy(payload->data, p_notify->aux_reply.data, + p_notify->aux_reply.length); + } + + /* success */ + ret = p_notify->aux_reply.length; + *operation_result = p_notify->result; +out: + reinit_completion(&adev->dm.dmub_aux_transfer_done); + mutex_unlock(&adev->dm.dpia_aux_lock); + return ret; +} + +int amdgpu_dm_process_dmub_set_config_sync( + struct dc_context *ctx, + unsigned int link_index, + struct set_config_cmd_payload *payload, + enum set_config_status *operation_result) +{ + struct amdgpu_device *adev = ctx->driver_context; + bool is_cmd_complete; + int ret; + + mutex_lock(&adev->dm.dpia_aux_lock); + is_cmd_complete = dc_process_dmub_set_config_async(ctx->dc, + link_index, payload, adev->dm.dmub_notify); + + if (is_cmd_complete || wait_for_completion_timeout(&adev->dm.dmub_aux_transfer_done, 10 * HZ)) { + ret = 0; + *operation_result = adev->dm.dmub_notify->sc_status; + } else { + DRM_ERROR("wait_for_completion_timeout timeout!"); + ret = -1; + *operation_result = SET_CONFIG_UNKNOWN_ERROR; + } + + if (!is_cmd_complete) + reinit_completion(&adev->dm.dmub_aux_transfer_done); + mutex_unlock(&adev->dm.dpia_aux_lock); + return ret; +} + +/* + * Check whether seamless boot is supported. + * + * So far we only support seamless boot on CHIP_VANGOGH. + * If everything goes well, we may consider expanding + * seamless boot to other ASICs. + */ +bool check_seamless_boot_capability(struct amdgpu_device *adev) +{ + switch (adev->ip_versions[DCE_HWIP][0]) { + case IP_VERSION(3, 0, 1): + if (!adev->mman.keep_stolen_vga_memory) + return true; + break; + default: + break; + } + + return false; +} + +bool dm_execute_dmub_cmd(const struct dc_context *ctx, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type) +{ + return dc_dmub_srv_cmd_run(ctx->dmub_srv, cmd, wait_type); +} + +bool dm_execute_dmub_cmd_list(const struct dc_context *ctx, unsigned int count, union dmub_rb_cmd *cmd, enum dm_dmub_wait_type wait_type) +{ + return dc_dmub_srv_cmd_run_list(ctx->dmub_srv, count, cmd, wait_type); +} diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h new file mode 100644 index 0000000000..9e4cc5eeda --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -0,0 +1,844 @@ +/* + * Copyright (C) 2015-2020 Advanced Micro Devices, Inc. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __AMDGPU_DM_H__ +#define __AMDGPU_DM_H__ + +#include <drm/display/drm_dp_mst_helper.h> +#include <drm/drm_atomic.h> +#include <drm/drm_connector.h> +#include <drm/drm_crtc.h> +#include <drm/drm_plane.h> +#include "link_service_types.h" + +/* + * This file contains the definition for amdgpu_display_manager + * and its API for amdgpu driver's use. + * This component provides all the display related functionality + * and this is the only component that calls DAL API. + * The API contained here intended for amdgpu driver use. + * The API that is called directly from KMS framework is located + * in amdgpu_dm_kms.h file + */ + +#define AMDGPU_DM_MAX_DISPLAY_INDEX 31 + +#define AMDGPU_DM_MAX_CRTC 6 + +#define AMDGPU_DM_MAX_NUM_EDP 2 + +#define AMDGPU_DMUB_NOTIFICATION_MAX 5 + +#define HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_IEEE_REGISTRATION_ID 0x00001A +#define AMD_VSDB_VERSION_3_FEATURECAP_REPLAYMODE 0x40 +#define HDMI_AMD_VENDOR_SPECIFIC_DATA_BLOCK_VERSION_3 0x3 +/* +#include "include/amdgpu_dal_power_if.h" +#include "amdgpu_dm_irq.h" +*/ + +#include "irq_types.h" +#include "signal_types.h" +#include "amdgpu_dm_crc.h" +#include "mod_info_packet.h" +struct aux_payload; +struct set_config_cmd_payload; +enum aux_return_code_type; +enum set_config_status; + +/* Forward declarations */ +struct amdgpu_device; +struct amdgpu_crtc; +struct drm_device; +struct dc; +struct amdgpu_bo; +struct dmub_srv; +struct dc_plane_state; +struct dmub_notification; + +struct amd_vsdb_block { + unsigned char ieee_id[3]; + unsigned char version; + unsigned char feature_caps; +}; + +struct common_irq_params { + struct amdgpu_device *adev; + enum dc_irq_source irq_src; + atomic64_t previous_timestamp; +}; + +/** + * struct dm_compressor_info - Buffer info used by frame buffer compression + * @cpu_addr: MMIO cpu addr + * @bo_ptr: Pointer to the buffer object + * @gpu_addr: MMIO gpu addr + */ +struct dm_compressor_info { + void *cpu_addr; + struct amdgpu_bo *bo_ptr; + uint64_t gpu_addr; +}; + +typedef void (*dmub_notify_interrupt_callback_t)(struct amdgpu_device *adev, struct dmub_notification *notify); + +/** + * struct dmub_hpd_work - Handle time consuming work in low priority outbox IRQ + * + * @handle_hpd_work: Work to be executed in a separate thread to handle hpd_low_irq + * @dmub_notify: notification for callback function + * @adev: amdgpu_device pointer + */ +struct dmub_hpd_work { + struct work_struct handle_hpd_work; + struct dmub_notification *dmub_notify; + struct amdgpu_device *adev; +}; + +/** + * struct vblank_control_work - Work data for vblank control + * @work: Kernel work data for the work event + * @dm: amdgpu display manager device + * @acrtc: amdgpu CRTC instance for which the event has occurred + * @stream: DC stream for which the event has occurred + * @enable: true if enabling vblank + */ +struct vblank_control_work { + struct work_struct work; + struct amdgpu_display_manager *dm; + struct amdgpu_crtc *acrtc; + struct dc_stream_state *stream; + bool enable; +}; + +/** + * struct amdgpu_dm_backlight_caps - Information about backlight + * + * Describe the backlight support for ACPI or eDP AUX. + */ +struct amdgpu_dm_backlight_caps { + /** + * @ext_caps: Keep the data struct with all the information about the + * display support for HDR. + */ + union dpcd_sink_ext_caps *ext_caps; + /** + * @aux_min_input_signal: Min brightness value supported by the display + */ + u32 aux_min_input_signal; + /** + * @aux_max_input_signal: Max brightness value supported by the display + * in nits. + */ + u32 aux_max_input_signal; + /** + * @min_input_signal: minimum possible input in range 0-255. + */ + int min_input_signal; + /** + * @max_input_signal: maximum possible input in range 0-255. + */ + int max_input_signal; + /** + * @caps_valid: true if these values are from the ACPI interface. + */ + bool caps_valid; + /** + * @aux_support: Describes if the display supports AUX backlight. + */ + bool aux_support; +}; + +/** + * struct dal_allocation - Tracks mapped FB memory for SMU communication + * @list: list of dal allocations + * @bo: GPU buffer object + * @cpu_ptr: CPU virtual address of the GPU buffer object + * @gpu_addr: GPU virtual address of the GPU buffer object + */ +struct dal_allocation { + struct list_head list; + struct amdgpu_bo *bo; + void *cpu_ptr; + u64 gpu_addr; +}; + +/** + * struct hpd_rx_irq_offload_work_queue - Work queue to handle hpd_rx_irq + * offload work + */ +struct hpd_rx_irq_offload_work_queue { + /** + * @wq: workqueue structure to queue offload work. + */ + struct workqueue_struct *wq; + /** + * @offload_lock: To protect fields of offload work queue. + */ + spinlock_t offload_lock; + /** + * @is_handling_link_loss: Used to prevent inserting link loss event when + * we're handling link loss + */ + bool is_handling_link_loss; + /** + * @is_handling_mst_msg_rdy_event: Used to prevent inserting mst message + * ready event when we're already handling mst message ready event + */ + bool is_handling_mst_msg_rdy_event; + /** + * @aconnector: The aconnector that this work queue is attached to + */ + struct amdgpu_dm_connector *aconnector; +}; + +/** + * struct hpd_rx_irq_offload_work - hpd_rx_irq offload work structure + */ +struct hpd_rx_irq_offload_work { + /** + * @work: offload work + */ + struct work_struct work; + /** + * @data: reference irq data which is used while handling offload work + */ + union hpd_irq_data data; + /** + * @offload_wq: offload work queue that this work is queued to + */ + struct hpd_rx_irq_offload_work_queue *offload_wq; +}; + +/** + * struct amdgpu_display_manager - Central amdgpu display manager device + * + * @dc: Display Core control structure + * @adev: AMDGPU base driver structure + * @ddev: DRM base driver structure + * @display_indexes_num: Max number of display streams supported + * @irq_handler_list_table_lock: Synchronizes access to IRQ tables + * @backlight_dev: Backlight control device + * @backlight_link: Link on which to control backlight + * @backlight_caps: Capabilities of the backlight device + * @freesync_module: Module handling freesync calculations + * @hdcp_workqueue: AMDGPU content protection queue + * @fw_dmcu: Reference to DMCU firmware + * @dmcu_fw_version: Version of the DMCU firmware + * @soc_bounding_box: SOC bounding box values provided by gpu_info FW + * @cached_state: Caches device atomic state for suspend/resume + * @cached_dc_state: Cached state of content streams + * @compressor: Frame buffer compression buffer. See &struct dm_compressor_info + * @force_timing_sync: set via debugfs. When set, indicates that all connected + * displays will be forced to synchronize. + * @dmcub_trace_event_en: enable dmcub trace events + * @dmub_outbox_params: DMUB Outbox parameters + * @num_of_edps: number of backlight eDPs + * @disable_hpd_irq: disables all HPD and HPD RX interrupt handling in the + * driver when true + * @dmub_aux_transfer_done: struct completion used to indicate when DMUB + * transfers are done + * @delayed_hpd_wq: work queue used to delay DMUB HPD work + */ +struct amdgpu_display_manager { + + struct dc *dc; + + /** + * @dmub_srv: + * + * DMUB service, used for controlling the DMUB on hardware + * that supports it. The pointer to the dmub_srv will be + * NULL on hardware that does not support it. + */ + struct dmub_srv *dmub_srv; + + /** + * @dmub_notify: + * + * Notification from DMUB. + */ + + struct dmub_notification *dmub_notify; + + /** + * @dmub_callback: + * + * Callback functions to handle notification from DMUB. + */ + + dmub_notify_interrupt_callback_t dmub_callback[AMDGPU_DMUB_NOTIFICATION_MAX]; + + /** + * @dmub_thread_offload: + * + * Flag to indicate if callback is offload. + */ + + bool dmub_thread_offload[AMDGPU_DMUB_NOTIFICATION_MAX]; + + /** + * @dmub_fb_info: + * + * Framebuffer regions for the DMUB. + */ + struct dmub_srv_fb_info *dmub_fb_info; + + /** + * @dmub_fw: + * + * DMUB firmware, required on hardware that has DMUB support. + */ + const struct firmware *dmub_fw; + + /** + * @dmub_bo: + * + * Buffer object for the DMUB. + */ + struct amdgpu_bo *dmub_bo; + + /** + * @dmub_bo_gpu_addr: + * + * GPU virtual address for the DMUB buffer object. + */ + u64 dmub_bo_gpu_addr; + + /** + * @dmub_bo_cpu_addr: + * + * CPU address for the DMUB buffer object. + */ + void *dmub_bo_cpu_addr; + + /** + * @dmcub_fw_version: + * + * DMCUB firmware version. + */ + uint32_t dmcub_fw_version; + + /** + * @cgs_device: + * + * The Common Graphics Services device. It provides an interface for + * accessing registers. + */ + struct cgs_device *cgs_device; + + struct amdgpu_device *adev; + struct drm_device *ddev; + u16 display_indexes_num; + + /** + * @atomic_obj: + * + * In combination with &dm_atomic_state it helps manage + * global atomic state that doesn't map cleanly into existing + * drm resources, like &dc_context. + */ + struct drm_private_obj atomic_obj; + + /** + * @dc_lock: + * + * Guards access to DC functions that can issue register write + * sequences. + */ + struct mutex dc_lock; + + /** + * @audio_lock: + * + * Guards access to audio instance changes. + */ + struct mutex audio_lock; + + /** + * @audio_component: + * + * Used to notify ELD changes to sound driver. + */ + struct drm_audio_component *audio_component; + + /** + * @audio_registered: + * + * True if the audio component has been registered + * successfully, false otherwise. + */ + bool audio_registered; + + /** + * @irq_handler_list_low_tab: + * + * Low priority IRQ handler table. + * + * It is a n*m table consisting of n IRQ sources, and m handlers per IRQ + * source. Low priority IRQ handlers are deferred to a workqueue to be + * processed. Hence, they can sleep. + * + * Note that handlers are called in the same order as they were + * registered (FIFO). + */ + struct list_head irq_handler_list_low_tab[DAL_IRQ_SOURCES_NUMBER]; + + /** + * @irq_handler_list_high_tab: + * + * High priority IRQ handler table. + * + * It is a n*m table, same as &irq_handler_list_low_tab. However, + * handlers in this table are not deferred and are called immediately. + */ + struct list_head irq_handler_list_high_tab[DAL_IRQ_SOURCES_NUMBER]; + + /** + * @pflip_params: + * + * Page flip IRQ parameters, passed to registered handlers when + * triggered. + */ + struct common_irq_params + pflip_params[DC_IRQ_SOURCE_PFLIP_LAST - DC_IRQ_SOURCE_PFLIP_FIRST + 1]; + + /** + * @vblank_params: + * + * Vertical blanking IRQ parameters, passed to registered handlers when + * triggered. + */ + struct common_irq_params + vblank_params[DC_IRQ_SOURCE_VBLANK6 - DC_IRQ_SOURCE_VBLANK1 + 1]; + + /** + * @vline0_params: + * + * OTG vertical interrupt0 IRQ parameters, passed to registered + * handlers when triggered. + */ + struct common_irq_params + vline0_params[DC_IRQ_SOURCE_DC6_VLINE0 - DC_IRQ_SOURCE_DC1_VLINE0 + 1]; + + /** + * @vupdate_params: + * + * Vertical update IRQ parameters, passed to registered handlers when + * triggered. + */ + struct common_irq_params + vupdate_params[DC_IRQ_SOURCE_VUPDATE6 - DC_IRQ_SOURCE_VUPDATE1 + 1]; + + /** + * @dmub_trace_params: + * + * DMUB trace event IRQ parameters, passed to registered handlers when + * triggered. + */ + struct common_irq_params + dmub_trace_params[1]; + + struct common_irq_params + dmub_outbox_params[1]; + + spinlock_t irq_handler_list_table_lock; + + struct backlight_device *backlight_dev[AMDGPU_DM_MAX_NUM_EDP]; + + const struct dc_link *backlight_link[AMDGPU_DM_MAX_NUM_EDP]; + + uint8_t num_of_edps; + + struct amdgpu_dm_backlight_caps backlight_caps[AMDGPU_DM_MAX_NUM_EDP]; + + struct mod_freesync *freesync_module; + struct hdcp_workqueue *hdcp_workqueue; + + /** + * @vblank_control_workqueue: + * + * Deferred work for vblank control events. + */ + struct workqueue_struct *vblank_control_workqueue; + + struct drm_atomic_state *cached_state; + struct dc_state *cached_dc_state; + + struct dm_compressor_info compressor; + + const struct firmware *fw_dmcu; + uint32_t dmcu_fw_version; + /** + * @soc_bounding_box: + * + * gpu_info FW provided soc bounding box struct or 0 if not + * available in FW + */ + const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box; + + /** + * @active_vblank_irq_count: + * + * number of currently active vblank irqs + */ + uint32_t active_vblank_irq_count; + +#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) + /** + * @secure_display_ctxs: + * + * Store the ROI information and the work_struct to command dmub and psp for + * all crtcs. + */ + struct secure_display_context *secure_display_ctxs; +#endif + /** + * @hpd_rx_offload_wq: + * + * Work queue to offload works of hpd_rx_irq + */ + struct hpd_rx_irq_offload_work_queue *hpd_rx_offload_wq; + /** + * @mst_encoders: + * + * fake encoders used for DP MST. + */ + struct amdgpu_encoder mst_encoders[AMDGPU_DM_MAX_CRTC]; + bool force_timing_sync; + bool disable_hpd_irq; + bool dmcub_trace_event_en; + /** + * @da_list: + * + * DAL fb memory allocation list, for communication with SMU. + */ + struct list_head da_list; + struct completion dmub_aux_transfer_done; + struct workqueue_struct *delayed_hpd_wq; + + /** + * @brightness: + * + * cached backlight values. + */ + u32 brightness[AMDGPU_DM_MAX_NUM_EDP]; + /** + * @actual_brightness: + * + * last successfully applied backlight values. + */ + u32 actual_brightness[AMDGPU_DM_MAX_NUM_EDP]; + + /** + * @aux_hpd_discon_quirk: + * + * quirk for hpd discon while aux is on-going. + * occurred on certain intel platform + */ + bool aux_hpd_discon_quirk; + + /** + * @dpia_aux_lock: + * + * Guards access to DPIA AUX + */ + struct mutex dpia_aux_lock; +}; + +enum dsc_clock_force_state { + DSC_CLK_FORCE_DEFAULT = 0, + DSC_CLK_FORCE_ENABLE, + DSC_CLK_FORCE_DISABLE, +}; + +struct dsc_preferred_settings { + enum dsc_clock_force_state dsc_force_enable; + uint32_t dsc_num_slices_v; + uint32_t dsc_num_slices_h; + uint32_t dsc_bits_per_pixel; + bool dsc_force_disable_passthrough; +}; + +enum mst_progress_status { + MST_STATUS_DEFAULT = 0, + MST_PROBE = BIT(0), + MST_REMOTE_EDID = BIT(1), + MST_ALLOCATE_NEW_PAYLOAD = BIT(2), + MST_CLEAR_ALLOCATED_PAYLOAD = BIT(3), +}; + +/** + * struct amdgpu_hdmi_vsdb_info - Keep track of the VSDB info + * + * AMDGPU supports FreeSync over HDMI by using the VSDB section, and this + * struct is useful to keep track of the display-specific information about + * FreeSync. + */ +struct amdgpu_hdmi_vsdb_info { + /** + * @amd_vsdb_version: Vendor Specific Data Block Version, should be + * used to determine which Vendor Specific InfoFrame (VSIF) to send. + */ + unsigned int amd_vsdb_version; + + /** + * @freesync_supported: FreeSync Supported. + */ + bool freesync_supported; + + /** + * @min_refresh_rate_hz: FreeSync Minimum Refresh Rate in Hz. + */ + unsigned int min_refresh_rate_hz; + + /** + * @max_refresh_rate_hz: FreeSync Maximum Refresh Rate in Hz + */ + unsigned int max_refresh_rate_hz; + + /** + * @replay_mode: Replay supported + */ + bool replay_mode; +}; + +struct amdgpu_dm_connector { + + struct drm_connector base; + uint32_t connector_id; + int bl_idx; + + /* we need to mind the EDID between detect + and get modes due to analog/digital/tvencoder */ + struct edid *edid; + + /* shared with amdgpu */ + struct amdgpu_hpd hpd; + + /* number of modes generated from EDID at 'dc_sink' */ + int num_modes; + + /* The 'old' sink - before an HPD. + * The 'current' sink is in dc_link->sink. */ + struct dc_sink *dc_sink; + struct dc_link *dc_link; + + /** + * @dc_em_sink: Reference to the emulated (virtual) sink. + */ + struct dc_sink *dc_em_sink; + + /* DM only */ + struct drm_dp_mst_topology_mgr mst_mgr; + struct amdgpu_dm_dp_aux dm_dp_aux; + struct drm_dp_mst_port *mst_output_port; + struct amdgpu_dm_connector *mst_root; + struct drm_dp_aux *dsc_aux; + struct mutex handle_mst_msg_ready; + + /* TODO see if we can merge with ddc_bus or make a dm_connector */ + struct amdgpu_i2c_adapter *i2c; + + /* Monitor range limits */ + /** + * @min_vfreq: Minimal frequency supported by the display in Hz. This + * value is set to zero when there is no FreeSync support. + */ + int min_vfreq; + + /** + * @max_vfreq: Maximum frequency supported by the display in Hz. This + * value is set to zero when there is no FreeSync support. + */ + int max_vfreq ; + int pixel_clock_mhz; + + /* Audio instance - protected by audio_lock. */ + int audio_inst; + + struct mutex hpd_lock; + + bool fake_enable; + bool force_yuv420_output; + struct dsc_preferred_settings dsc_settings; + union dp_downstream_port_present mst_downstream_port_present; + /* Cached display modes */ + struct drm_display_mode freesync_vid_base; + + int psr_skip_count; + + /* Record progress status of mst*/ + uint8_t mst_status; + + /* Automated testing */ + bool timing_changed; + struct dc_crtc_timing *timing_requested; + + /* Adaptive Sync */ + bool pack_sdp_v1_3; + enum adaptive_sync_type as_type; + struct amdgpu_hdmi_vsdb_info vsdb_info; +}; + +static inline void amdgpu_dm_set_mst_status(uint8_t *status, + uint8_t flags, bool set) +{ + if (set) + *status |= flags; + else + *status &= ~flags; +} + +#define to_amdgpu_dm_connector(x) container_of(x, struct amdgpu_dm_connector, base) + +extern const struct amdgpu_ip_block_version dm_ip_block; + +struct dm_plane_state { + struct drm_plane_state base; + struct dc_plane_state *dc_state; +}; + +struct dm_crtc_state { + struct drm_crtc_state base; + struct dc_stream_state *stream; + + bool cm_has_degamma; + bool cm_is_degamma_srgb; + + bool mpo_requested; + + int update_type; + int active_planes; + + int crc_skip_count; + + bool freesync_vrr_info_changed; + + bool dsc_force_changed; + bool vrr_supported; + struct mod_freesync_config freesync_config; + struct dc_info_packet vrr_infopacket; + + int abm_level; +}; + +#define to_dm_crtc_state(x) container_of(x, struct dm_crtc_state, base) + +struct dm_atomic_state { + struct drm_private_state base; + + struct dc_state *context; +}; + +#define to_dm_atomic_state(x) container_of(x, struct dm_atomic_state, base) + +struct dm_connector_state { + struct drm_connector_state base; + + enum amdgpu_rmx_type scaling; + uint8_t underscan_vborder; + uint8_t underscan_hborder; + bool underscan_enable; + bool freesync_capable; + bool update_hdcp; + uint8_t abm_level; + int vcpi_slots; + uint64_t pbn; +}; + +#define to_dm_connector_state(x)\ + container_of((x), struct dm_connector_state, base) + +void amdgpu_dm_connector_funcs_reset(struct drm_connector *connector); +struct drm_connector_state * +amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector); +int amdgpu_dm_connector_atomic_set_property(struct drm_connector *connector, + struct drm_connector_state *state, + struct drm_property *property, + uint64_t val); + +int amdgpu_dm_connector_atomic_get_property(struct drm_connector *connector, + const struct drm_connector_state *state, + struct drm_property *property, + uint64_t *val); + +int amdgpu_dm_get_encoder_crtc_mask(struct amdgpu_device *adev); + +void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, + struct amdgpu_dm_connector *aconnector, + int connector_type, + struct dc_link *link, + int link_index); + +enum drm_mode_status amdgpu_dm_connector_mode_valid(struct drm_connector *connector, + struct drm_display_mode *mode); + +void dm_restore_drm_connector_state(struct drm_device *dev, + struct drm_connector *connector); + +void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, + struct edid *edid); + +void amdgpu_dm_trigger_timing_sync(struct drm_device *dev); + +#define MAX_COLOR_LUT_ENTRIES 4096 +/* Legacy gamm LUT users such as X doesn't like large LUT sizes */ +#define MAX_COLOR_LEGACY_LUT_ENTRIES 256 + +void amdgpu_dm_init_color_mod(void); +int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state); +int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc); +int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc, + struct dc_plane_state *dc_plane_state); + +void amdgpu_dm_update_connector_after_detect( + struct amdgpu_dm_connector *aconnector); + +extern const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs; + +int amdgpu_dm_process_dmub_aux_transfer_sync(struct dc_context *ctx, unsigned int link_index, + struct aux_payload *payload, enum aux_return_code_type *operation_result); + +int amdgpu_dm_process_dmub_set_config_sync(struct dc_context *ctx, unsigned int link_index, + struct set_config_cmd_payload *payload, enum set_config_status *operation_result); + +bool check_seamless_boot_capability(struct amdgpu_device *adev); + +struct dc_stream_state * + create_validate_stream_for_sink(struct amdgpu_dm_connector *aconnector, + const struct drm_display_mode *drm_mode, + const struct dm_connector_state *dm_state, + const struct dc_stream_state *old_stream); + +int dm_atomic_get_state(struct drm_atomic_state *state, + struct dm_atomic_state **dm_state); + +struct amdgpu_dm_connector * +amdgpu_dm_find_first_crtc_matching_connector(struct drm_atomic_state *state, + struct drm_crtc *crtc); + +int convert_dc_color_depth_into_bpc(enum dc_color_depth display_color_depth); +#endif /* __AMDGPU_DM_H__ */ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c new file mode 100644 index 0000000000..a4cb23d059 --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c @@ -0,0 +1,590 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#include "amdgpu.h" +#include "amdgpu_mode.h" +#include "amdgpu_dm.h" +#include "dc.h" +#include "modules/color/color_gamma.h" +#include "basics/conversion.h" + +/** + * DOC: overview + * + * The DC interface to HW gives us the following color management blocks + * per pipe (surface): + * + * - Input gamma LUT (de-normalized) + * - Input CSC (normalized) + * - Surface degamma LUT (normalized) + * - Surface CSC (normalized) + * - Surface regamma LUT (normalized) + * - Output CSC (normalized) + * + * But these aren't a direct mapping to DRM color properties. The current DRM + * interface exposes CRTC degamma, CRTC CTM and CRTC regamma while our hardware + * is essentially giving: + * + * Plane CTM -> Plane degamma -> Plane CTM -> Plane regamma -> Plane CTM + * + * The input gamma LUT block isn't really applicable here since it operates + * on the actual input data itself rather than the HW fp representation. The + * input and output CSC blocks are technically available to use as part of + * the DC interface but are typically used internally by DC for conversions + * between color spaces. These could be blended together with user + * adjustments in the future but for now these should remain untouched. + * + * The pipe blending also happens after these blocks so we don't actually + * support any CRTC props with correct blending with multiple planes - but we + * can still support CRTC color management properties in DM in most single + * plane cases correctly with clever management of the DC interface in DM. + * + * As per DRM documentation, blocks should be in hardware bypass when their + * respective property is set to NULL. A linear DGM/RGM LUT should also + * considered as putting the respective block into bypass mode. + * + * This means that the following + * configuration is assumed to be the default: + * + * Plane DGM Bypass -> Plane CTM Bypass -> Plane RGM Bypass -> ... + * CRTC DGM Bypass -> CRTC CTM Bypass -> CRTC RGM Bypass + */ + +#define MAX_DRM_LUT_VALUE 0xFFFF + +/** + * amdgpu_dm_init_color_mod - Initialize the color module. + * + * We're not using the full color module, only certain components. + * Only call setup functions for components that we need. + */ +void amdgpu_dm_init_color_mod(void) +{ + setup_x_points_distribution(); +} + +/** + * __extract_blob_lut - Extracts the DRM lut and lut size from a blob. + * @blob: DRM color mgmt property blob + * @size: lut size + * + * Returns: + * DRM LUT or NULL + */ +static const struct drm_color_lut * +__extract_blob_lut(const struct drm_property_blob *blob, uint32_t *size) +{ + *size = blob ? drm_color_lut_size(blob) : 0; + return blob ? (struct drm_color_lut *)blob->data : NULL; +} + +/** + * __is_lut_linear - check if the given lut is a linear mapping of values + * @lut: given lut to check values + * @size: lut size + * + * It is considered linear if the lut represents: + * f(a) = (0xFF00/MAX_COLOR_LUT_ENTRIES-1)a; for integer a in [0, + * MAX_COLOR_LUT_ENTRIES) + * + * Returns: + * True if the given lut is a linear mapping of values, i.e. it acts like a + * bypass LUT. Otherwise, false. + */ +static bool __is_lut_linear(const struct drm_color_lut *lut, uint32_t size) +{ + int i; + uint32_t expected; + int delta; + + for (i = 0; i < size; i++) { + /* All color values should equal */ + if ((lut[i].red != lut[i].green) || (lut[i].green != lut[i].blue)) + return false; + + expected = i * MAX_DRM_LUT_VALUE / (size-1); + + /* Allow a +/-1 error. */ + delta = lut[i].red - expected; + if (delta < -1 || 1 < delta) + return false; + } + return true; +} + +/** + * __drm_lut_to_dc_gamma - convert the drm_color_lut to dc_gamma. + * @lut: DRM lookup table for color conversion + * @gamma: DC gamma to set entries + * @is_legacy: legacy or atomic gamma + * + * The conversion depends on the size of the lut - whether or not it's legacy. + */ +static void __drm_lut_to_dc_gamma(const struct drm_color_lut *lut, + struct dc_gamma *gamma, bool is_legacy) +{ + uint32_t r, g, b; + int i; + + if (is_legacy) { + for (i = 0; i < MAX_COLOR_LEGACY_LUT_ENTRIES; i++) { + r = drm_color_lut_extract(lut[i].red, 16); + g = drm_color_lut_extract(lut[i].green, 16); + b = drm_color_lut_extract(lut[i].blue, 16); + + gamma->entries.red[i] = dc_fixpt_from_int(r); + gamma->entries.green[i] = dc_fixpt_from_int(g); + gamma->entries.blue[i] = dc_fixpt_from_int(b); + } + return; + } + + /* else */ + for (i = 0; i < MAX_COLOR_LUT_ENTRIES; i++) { + r = drm_color_lut_extract(lut[i].red, 16); + g = drm_color_lut_extract(lut[i].green, 16); + b = drm_color_lut_extract(lut[i].blue, 16); + + gamma->entries.red[i] = dc_fixpt_from_fraction(r, MAX_DRM_LUT_VALUE); + gamma->entries.green[i] = dc_fixpt_from_fraction(g, MAX_DRM_LUT_VALUE); + gamma->entries.blue[i] = dc_fixpt_from_fraction(b, MAX_DRM_LUT_VALUE); + } +} + +/** + * __drm_ctm_to_dc_matrix - converts a DRM CTM to a DC CSC float matrix + * @ctm: DRM color transformation matrix + * @matrix: DC CSC float matrix + * + * The matrix needs to be a 3x4 (12 entry) matrix. + */ +static void __drm_ctm_to_dc_matrix(const struct drm_color_ctm *ctm, + struct fixed31_32 *matrix) +{ + int64_t val; + int i; + + /* + * DRM gives a 3x3 matrix, but DC wants 3x4. Assuming we're operating + * with homogeneous coordinates, augment the matrix with 0's. + * + * The format provided is S31.32, using signed-magnitude representation. + * Our fixed31_32 is also S31.32, but is using 2's complement. We have + * to convert from signed-magnitude to 2's complement. + */ + for (i = 0; i < 12; i++) { + /* Skip 4th element */ + if (i % 4 == 3) { + matrix[i] = dc_fixpt_zero; + continue; + } + + /* gamut_remap_matrix[i] = ctm[i - floor(i/4)] */ + val = ctm->matrix[i - (i / 4)]; + /* If negative, convert to 2's complement. */ + if (val & (1ULL << 63)) + val = -(val & ~(1ULL << 63)); + + matrix[i].value = val; + } +} + +/** + * __set_legacy_tf - Calculates the legacy transfer function + * @func: transfer function + * @lut: lookup table that defines the color space + * @lut_size: size of respective lut + * @has_rom: if ROM can be used for hardcoded curve + * + * Only for sRGB input space + * + * Returns: + * 0 in case of success, -ENOMEM if fails + */ +static int __set_legacy_tf(struct dc_transfer_func *func, + const struct drm_color_lut *lut, uint32_t lut_size, + bool has_rom) +{ + struct dc_gamma *gamma = NULL; + struct calculate_buffer cal_buffer = {0}; + bool res; + + ASSERT(lut && lut_size == MAX_COLOR_LEGACY_LUT_ENTRIES); + + cal_buffer.buffer_index = -1; + + gamma = dc_create_gamma(); + if (!gamma) + return -ENOMEM; + + gamma->type = GAMMA_RGB_256; + gamma->num_entries = lut_size; + __drm_lut_to_dc_gamma(lut, gamma, true); + + res = mod_color_calculate_regamma_params(func, gamma, true, has_rom, + NULL, &cal_buffer); + + dc_gamma_release(&gamma); + + return res ? 0 : -ENOMEM; +} + +/** + * __set_output_tf - calculates the output transfer function based on expected input space. + * @func: transfer function + * @lut: lookup table that defines the color space + * @lut_size: size of respective lut + * @has_rom: if ROM can be used for hardcoded curve + * + * Returns: + * 0 in case of success. -ENOMEM if fails. + */ +static int __set_output_tf(struct dc_transfer_func *func, + const struct drm_color_lut *lut, uint32_t lut_size, + bool has_rom) +{ + struct dc_gamma *gamma = NULL; + struct calculate_buffer cal_buffer = {0}; + bool res; + + ASSERT(lut && lut_size == MAX_COLOR_LUT_ENTRIES); + + cal_buffer.buffer_index = -1; + + gamma = dc_create_gamma(); + if (!gamma) + return -ENOMEM; + + gamma->num_entries = lut_size; + __drm_lut_to_dc_gamma(lut, gamma, false); + + if (func->tf == TRANSFER_FUNCTION_LINEAR) { + /* + * Color module doesn't like calculating regamma params + * on top of a linear input. But degamma params can be used + * instead to simulate this. + */ + gamma->type = GAMMA_CUSTOM; + res = mod_color_calculate_degamma_params(NULL, func, + gamma, true); + } else { + /* + * Assume sRGB. The actual mapping will depend on whether the + * input was legacy or not. + */ + gamma->type = GAMMA_CS_TFM_1D; + res = mod_color_calculate_regamma_params(func, gamma, false, + has_rom, NULL, &cal_buffer); + } + + dc_gamma_release(&gamma); + + return res ? 0 : -ENOMEM; +} + +/** + * __set_input_tf - calculates the input transfer function based on expected + * input space. + * @func: transfer function + * @lut: lookup table that defines the color space + * @lut_size: size of respective lut. + * + * Returns: + * 0 in case of success. -ENOMEM if fails. + */ +static int __set_input_tf(struct dc_transfer_func *func, + const struct drm_color_lut *lut, uint32_t lut_size) +{ + struct dc_gamma *gamma = NULL; + bool res; + + gamma = dc_create_gamma(); + if (!gamma) + return -ENOMEM; + + gamma->type = GAMMA_CUSTOM; + gamma->num_entries = lut_size; + + __drm_lut_to_dc_gamma(lut, gamma, false); + + res = mod_color_calculate_degamma_params(NULL, func, gamma, true); + dc_gamma_release(&gamma); + + return res ? 0 : -ENOMEM; +} + +/** + * amdgpu_dm_verify_lut_sizes - verifies if DRM luts match the hw supported sizes + * @crtc_state: the DRM CRTC state + * + * Verifies that the Degamma and Gamma LUTs attached to the &crtc_state + * are of the expected size. + * + * Returns: + * 0 on success. -EINVAL if any lut sizes are invalid. + */ +int amdgpu_dm_verify_lut_sizes(const struct drm_crtc_state *crtc_state) +{ + const struct drm_color_lut *lut = NULL; + uint32_t size = 0; + + lut = __extract_blob_lut(crtc_state->degamma_lut, &size); + if (lut && size != MAX_COLOR_LUT_ENTRIES) { + DRM_DEBUG_DRIVER( + "Invalid Degamma LUT size. Should be %u but got %u.\n", + MAX_COLOR_LUT_ENTRIES, size); + return -EINVAL; + } + + lut = __extract_blob_lut(crtc_state->gamma_lut, &size); + if (lut && size != MAX_COLOR_LUT_ENTRIES && + size != MAX_COLOR_LEGACY_LUT_ENTRIES) { + DRM_DEBUG_DRIVER( + "Invalid Gamma LUT size. Should be %u (or %u for legacy) but got %u.\n", + MAX_COLOR_LUT_ENTRIES, MAX_COLOR_LEGACY_LUT_ENTRIES, + size); + return -EINVAL; + } + + return 0; +} + +/** + * amdgpu_dm_update_crtc_color_mgmt: Maps DRM color management to DC stream. + * @crtc: amdgpu_dm crtc state + * + * With no plane level color management properties we're free to use any + * of the HW blocks as long as the CRTC CTM always comes before the + * CRTC RGM and after the CRTC DGM. + * + * - The CRTC RGM block will be placed in the RGM LUT block if it is non-linear. + * - The CRTC DGM block will be placed in the DGM LUT block if it is non-linear. + * - The CRTC CTM will be placed in the gamut remap block if it is non-linear. + * + * The RGM block is typically more fully featured and accurate across + * all ASICs - DCE can't support a custom non-linear CRTC DGM. + * + * For supporting both plane level color management and CRTC level color + * management at once we have to either restrict the usage of CRTC properties + * or blend adjustments together. + * + * Returns: + * 0 on success. Error code if setup fails. + */ +int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc) +{ + struct dc_stream_state *stream = crtc->stream; + struct amdgpu_device *adev = drm_to_adev(crtc->base.state->dev); + bool has_rom = adev->asic_type <= CHIP_RAVEN; + struct drm_color_ctm *ctm = NULL; + const struct drm_color_lut *degamma_lut, *regamma_lut; + uint32_t degamma_size, regamma_size; + bool has_regamma, has_degamma; + bool is_legacy; + int r; + + r = amdgpu_dm_verify_lut_sizes(&crtc->base); + if (r) + return r; + + degamma_lut = __extract_blob_lut(crtc->base.degamma_lut, °amma_size); + regamma_lut = __extract_blob_lut(crtc->base.gamma_lut, ®amma_size); + + has_degamma = + degamma_lut && !__is_lut_linear(degamma_lut, degamma_size); + + has_regamma = + regamma_lut && !__is_lut_linear(regamma_lut, regamma_size); + + is_legacy = regamma_size == MAX_COLOR_LEGACY_LUT_ENTRIES; + + /* Reset all adjustments. */ + crtc->cm_has_degamma = false; + crtc->cm_is_degamma_srgb = false; + + /* Setup regamma and degamma. */ + if (is_legacy) { + /* + * Legacy regamma forces us to use the sRGB RGM as a base. + * This also means we can't use linear DGM since DGM needs + * to use sRGB as a base as well, resulting in incorrect CRTC + * DGM and CRTC CTM. + * + * TODO: Just map this to the standard regamma interface + * instead since this isn't really right. One of the cases + * where this setup currently fails is trying to do an + * inverse color ramp in legacy userspace. + */ + crtc->cm_is_degamma_srgb = true; + stream->out_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS; + stream->out_transfer_func->tf = TRANSFER_FUNCTION_SRGB; + + r = __set_legacy_tf(stream->out_transfer_func, regamma_lut, + regamma_size, has_rom); + if (r) + return r; + } else if (has_regamma) { + /* If atomic regamma, CRTC RGM goes into RGM LUT. */ + stream->out_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS; + stream->out_transfer_func->tf = TRANSFER_FUNCTION_LINEAR; + + r = __set_output_tf(stream->out_transfer_func, regamma_lut, + regamma_size, has_rom); + if (r) + return r; + } else { + /* + * No CRTC RGM means we can just put the block into bypass + * since we don't have any plane level adjustments using it. + */ + stream->out_transfer_func->type = TF_TYPE_BYPASS; + stream->out_transfer_func->tf = TRANSFER_FUNCTION_LINEAR; + } + + /* + * CRTC DGM goes into DGM LUT. It would be nice to place it + * into the RGM since it's a more featured block but we'd + * have to place the CTM in the OCSC in that case. + */ + crtc->cm_has_degamma = has_degamma; + + /* Setup CRTC CTM. */ + if (crtc->base.ctm) { + ctm = (struct drm_color_ctm *)crtc->base.ctm->data; + + /* + * Gamut remapping must be used for gamma correction + * since it comes before the regamma correction. + * + * OCSC could be used for gamma correction, but we'd need to + * blend the adjustments together with the required output + * conversion matrix - so just use the gamut remap block + * for now. + */ + __drm_ctm_to_dc_matrix(ctm, stream->gamut_remap_matrix.matrix); + + stream->gamut_remap_matrix.enable_remap = true; + stream->csc_color_matrix.enable_adjustment = false; + } else { + /* Bypass CTM. */ + stream->gamut_remap_matrix.enable_remap = false; + stream->csc_color_matrix.enable_adjustment = false; + } + + return 0; +} + +/** + * amdgpu_dm_update_plane_color_mgmt: Maps DRM color management to DC plane. + * @crtc: amdgpu_dm crtc state + * @dc_plane_state: target DC surface + * + * Update the underlying dc_stream_state's input transfer function (ITF) in + * preparation for hardware commit. The transfer function used depends on + * the preparation done on the stream for color management. + * + * Returns: + * 0 on success. -ENOMEM if mem allocation fails. + */ +int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc, + struct dc_plane_state *dc_plane_state) +{ + const struct drm_color_lut *degamma_lut; + enum dc_transfer_func_predefined tf = TRANSFER_FUNCTION_SRGB; + uint32_t degamma_size; + int r; + + /* Get the correct base transfer function for implicit degamma. */ + switch (dc_plane_state->format) { + case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr: + case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb: + /* DC doesn't have a transfer function for BT601 specifically. */ + tf = TRANSFER_FUNCTION_BT709; + break; + default: + break; + } + + if (crtc->cm_has_degamma) { + degamma_lut = __extract_blob_lut(crtc->base.degamma_lut, + °amma_size); + ASSERT(degamma_size == MAX_COLOR_LUT_ENTRIES); + + dc_plane_state->in_transfer_func->type = + TF_TYPE_DISTRIBUTED_POINTS; + + /* + * This case isn't fully correct, but also fairly + * uncommon. This is userspace trying to use a + * legacy gamma LUT + atomic degamma LUT + * at the same time. + * + * Legacy gamma requires the input to be in linear + * space, so that means we need to apply an sRGB + * degamma. But color module also doesn't support + * a user ramp in this case so the degamma will + * be lost. + * + * Even if we did support it, it's still not right: + * + * Input -> CRTC DGM -> sRGB DGM -> CRTC CTM -> + * sRGB RGM -> CRTC RGM -> Output + * + * The CSC will be done in the wrong space since + * we're applying an sRGB DGM on top of the CRTC + * DGM. + * + * TODO: Don't use the legacy gamma interface and just + * map these to the atomic one instead. + */ + if (crtc->cm_is_degamma_srgb) + dc_plane_state->in_transfer_func->tf = tf; + else + dc_plane_state->in_transfer_func->tf = + TRANSFER_FUNCTION_LINEAR; + + r = __set_input_tf(dc_plane_state->in_transfer_func, + degamma_lut, degamma_size); + if (r) + return r; + } else if (crtc->cm_is_degamma_srgb) { + /* + * For legacy gamma support we need the regamma input + * in linear space. Assume that the input is sRGB. + */ + dc_plane_state->in_transfer_func->type = TF_TYPE_PREDEFINED; + dc_plane_state->in_transfer_func->tf = tf; + + if (tf != TRANSFER_FUNCTION_SRGB && + !mod_color_calculate_degamma_params(NULL, + dc_plane_state->in_transfer_func, NULL, false)) + return -ENOMEM; + } else { + /* ...Otherwise we can just bypass the DGM block. */ + dc_plane_state->in_transfer_func->type = TF_TYPE_BYPASS; + dc_plane_state->in_transfer_func->tf = TRANSFER_FUNCTION_LINEAR; + } + + return 0; +} diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c new file mode 100644 index 0000000000..52ecfa746b --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c @@ -0,0 +1,555 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include <drm/drm_crtc.h> +#include <drm/drm_vblank.h> + +#include "amdgpu.h" +#include "amdgpu_dm.h" +#include "dc.h" +#include "amdgpu_securedisplay.h" + +static const char *const pipe_crc_sources[] = { + "none", + "crtc", + "crtc dither", + "dprx", + "dprx dither", + "auto", +}; + +static enum amdgpu_dm_pipe_crc_source dm_parse_crc_source(const char *source) +{ + if (!source || !strcmp(source, "none")) + return AMDGPU_DM_PIPE_CRC_SOURCE_NONE; + if (!strcmp(source, "auto") || !strcmp(source, "crtc")) + return AMDGPU_DM_PIPE_CRC_SOURCE_CRTC; + if (!strcmp(source, "dprx")) + return AMDGPU_DM_PIPE_CRC_SOURCE_DPRX; + if (!strcmp(source, "crtc dither")) + return AMDGPU_DM_PIPE_CRC_SOURCE_CRTC_DITHER; + if (!strcmp(source, "dprx dither")) + return AMDGPU_DM_PIPE_CRC_SOURCE_DPRX_DITHER; + + return AMDGPU_DM_PIPE_CRC_SOURCE_INVALID; +} + +static bool dm_is_crc_source_crtc(enum amdgpu_dm_pipe_crc_source src) +{ + return (src == AMDGPU_DM_PIPE_CRC_SOURCE_CRTC) || + (src == AMDGPU_DM_PIPE_CRC_SOURCE_CRTC_DITHER); +} + +static bool dm_is_crc_source_dprx(enum amdgpu_dm_pipe_crc_source src) +{ + return (src == AMDGPU_DM_PIPE_CRC_SOURCE_DPRX) || + (src == AMDGPU_DM_PIPE_CRC_SOURCE_DPRX_DITHER); +} + +static bool dm_need_crc_dither(enum amdgpu_dm_pipe_crc_source src) +{ + return (src == AMDGPU_DM_PIPE_CRC_SOURCE_CRTC_DITHER) || + (src == AMDGPU_DM_PIPE_CRC_SOURCE_DPRX_DITHER) || + (src == AMDGPU_DM_PIPE_CRC_SOURCE_NONE); +} + +const char *const *amdgpu_dm_crtc_get_crc_sources(struct drm_crtc *crtc, + size_t *count) +{ + *count = ARRAY_SIZE(pipe_crc_sources); + return pipe_crc_sources; +} + +#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY +static void amdgpu_dm_set_crc_window_default(struct drm_crtc *crtc, struct dc_stream_state *stream) +{ + struct drm_device *drm_dev = crtc->dev; + struct amdgpu_display_manager *dm = &drm_to_adev(drm_dev)->dm; + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + bool was_activated; + + spin_lock_irq(&drm_dev->event_lock); + was_activated = acrtc->dm_irq_params.window_param.activated; + acrtc->dm_irq_params.window_param.x_start = 0; + acrtc->dm_irq_params.window_param.y_start = 0; + acrtc->dm_irq_params.window_param.x_end = 0; + acrtc->dm_irq_params.window_param.y_end = 0; + acrtc->dm_irq_params.window_param.activated = false; + acrtc->dm_irq_params.window_param.update_win = false; + acrtc->dm_irq_params.window_param.skip_frame_cnt = 0; + spin_unlock_irq(&drm_dev->event_lock); + + /* Disable secure_display if it was enabled */ + if (was_activated) { + /* stop ROI update on this crtc */ + flush_work(&dm->secure_display_ctxs[crtc->index].notify_ta_work); + flush_work(&dm->secure_display_ctxs[crtc->index].forward_roi_work); + dc_stream_forward_crc_window(stream, NULL, true); + } +} + +static void amdgpu_dm_crtc_notify_ta_to_read(struct work_struct *work) +{ + struct secure_display_context *secure_display_ctx; + struct psp_context *psp; + struct ta_securedisplay_cmd *securedisplay_cmd; + struct drm_crtc *crtc; + struct dc_stream_state *stream; + uint8_t phy_inst; + int ret; + + secure_display_ctx = container_of(work, struct secure_display_context, notify_ta_work); + crtc = secure_display_ctx->crtc; + + if (!crtc) + return; + + psp = &drm_to_adev(crtc->dev)->psp; + + if (!psp->securedisplay_context.context.initialized) { + DRM_DEBUG_DRIVER("Secure Display fails to notify PSP TA\n"); + return; + } + + stream = to_amdgpu_crtc(crtc)->dm_irq_params.stream; + phy_inst = stream->link->link_enc_hw_inst; + + /* need lock for multiple crtcs to use the command buffer */ + mutex_lock(&psp->securedisplay_context.mutex); + + psp_prep_securedisplay_cmd_buf(psp, &securedisplay_cmd, + TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC); + + securedisplay_cmd->securedisplay_in_message.send_roi_crc.phy_id = phy_inst; + + /* PSP TA is expected to finish data transmission over I2C within current frame, + * even there are up to 4 crtcs request to send in this frame. + */ + ret = psp_securedisplay_invoke(psp, TA_SECUREDISPLAY_COMMAND__SEND_ROI_CRC); + + if (!ret) { + if (securedisplay_cmd->status != TA_SECUREDISPLAY_STATUS__SUCCESS) + psp_securedisplay_parse_resp_status(psp, securedisplay_cmd->status); + } + + mutex_unlock(&psp->securedisplay_context.mutex); +} + +static void +amdgpu_dm_forward_crc_window(struct work_struct *work) +{ + struct secure_display_context *secure_display_ctx; + struct amdgpu_display_manager *dm; + struct drm_crtc *crtc; + struct dc_stream_state *stream; + + secure_display_ctx = container_of(work, struct secure_display_context, forward_roi_work); + crtc = secure_display_ctx->crtc; + + if (!crtc) + return; + + dm = &drm_to_adev(crtc->dev)->dm; + stream = to_amdgpu_crtc(crtc)->dm_irq_params.stream; + + mutex_lock(&dm->dc_lock); + dc_stream_forward_crc_window(stream, &secure_display_ctx->rect, false); + mutex_unlock(&dm->dc_lock); +} + +bool amdgpu_dm_crc_window_is_activated(struct drm_crtc *crtc) +{ + struct drm_device *drm_dev = crtc->dev; + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + bool ret = false; + + spin_lock_irq(&drm_dev->event_lock); + ret = acrtc->dm_irq_params.window_param.activated; + spin_unlock_irq(&drm_dev->event_lock); + + return ret; +} +#endif + +int +amdgpu_dm_crtc_verify_crc_source(struct drm_crtc *crtc, const char *src_name, + size_t *values_cnt) +{ + enum amdgpu_dm_pipe_crc_source source = dm_parse_crc_source(src_name); + + if (source < 0) { + DRM_DEBUG_DRIVER("Unknown CRC source %s for CRTC%d\n", + src_name, crtc->index); + return -EINVAL; + } + + *values_cnt = 3; + return 0; +} + +int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc, + struct dm_crtc_state *dm_crtc_state, + enum amdgpu_dm_pipe_crc_source source) +{ + struct amdgpu_device *adev = drm_to_adev(crtc->dev); + struct dc_stream_state *stream_state = dm_crtc_state->stream; + bool enable = amdgpu_dm_is_valid_crc_source(source); + int ret = 0; + + /* Configuration will be deferred to stream enable. */ + if (!stream_state) + return -EINVAL; + + mutex_lock(&adev->dm.dc_lock); + + /* Enable or disable CRTC CRC generation */ + if (dm_is_crc_source_crtc(source) || source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE) { + if (!dc_stream_configure_crc(stream_state->ctx->dc, + stream_state, NULL, enable, enable)) { + ret = -EINVAL; + goto unlock; + } + } + + /* Configure dithering */ + if (!dm_need_crc_dither(source)) { + dc_stream_set_dither_option(stream_state, DITHER_OPTION_TRUN8); + dc_stream_set_dyn_expansion(stream_state->ctx->dc, stream_state, + DYN_EXPANSION_DISABLE); + } else { + dc_stream_set_dither_option(stream_state, + DITHER_OPTION_DEFAULT); + dc_stream_set_dyn_expansion(stream_state->ctx->dc, stream_state, + DYN_EXPANSION_AUTO); + } + +unlock: + mutex_unlock(&adev->dm.dc_lock); + + return ret; +} + +int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name) +{ + enum amdgpu_dm_pipe_crc_source source = dm_parse_crc_source(src_name); + enum amdgpu_dm_pipe_crc_source cur_crc_src; + struct drm_crtc_commit *commit; + struct dm_crtc_state *crtc_state; + struct drm_device *drm_dev = crtc->dev; + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + struct drm_dp_aux *aux = NULL; + bool enable = false; + bool enabled = false; + int ret = 0; + + if (source < 0) { + DRM_DEBUG_DRIVER("Unknown CRC source %s for CRTC%d\n", + src_name, crtc->index); + return -EINVAL; + } + + ret = drm_modeset_lock(&crtc->mutex, NULL); + if (ret) + return ret; + + spin_lock(&crtc->commit_lock); + commit = list_first_entry_or_null(&crtc->commit_list, + struct drm_crtc_commit, commit_entry); + if (commit) + drm_crtc_commit_get(commit); + spin_unlock(&crtc->commit_lock); + + if (commit) { + /* + * Need to wait for all outstanding programming to complete + * in commit tail since it can modify CRC related fields and + * hardware state. Since we're holding the CRTC lock we're + * guaranteed that no other commit work can be queued off + * before we modify the state below. + */ + ret = wait_for_completion_interruptible_timeout( + &commit->hw_done, 10 * HZ); + if (ret) + goto cleanup; + } + + enable = amdgpu_dm_is_valid_crc_source(source); + crtc_state = to_dm_crtc_state(crtc->state); + spin_lock_irq(&drm_dev->event_lock); + cur_crc_src = acrtc->dm_irq_params.crc_src; + spin_unlock_irq(&drm_dev->event_lock); + + /* + * USER REQ SRC | CURRENT SRC | BEHAVIOR + * ----------------------------- + * None | None | Do nothing + * None | CRTC | Disable CRTC CRC, set default to dither + * None | DPRX | Disable DPRX CRC, need 'aux', set default to dither + * None | CRTC DITHER | Disable CRTC CRC + * None | DPRX DITHER | Disable DPRX CRC, need 'aux' + * CRTC | XXXX | Enable CRTC CRC, no dither + * DPRX | XXXX | Enable DPRX CRC, need 'aux', no dither + * CRTC DITHER | XXXX | Enable CRTC CRC, set dither + * DPRX DITHER | XXXX | Enable DPRX CRC, need 'aux', set dither + */ + if (dm_is_crc_source_dprx(source) || + (source == AMDGPU_DM_PIPE_CRC_SOURCE_NONE && + dm_is_crc_source_dprx(cur_crc_src))) { + struct amdgpu_dm_connector *aconn = NULL; + struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; + + drm_connector_list_iter_begin(crtc->dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + if (!connector->state || connector->state->crtc != crtc) + continue; + + aconn = to_amdgpu_dm_connector(connector); + break; + } + drm_connector_list_iter_end(&conn_iter); + + if (!aconn) { + DRM_DEBUG_DRIVER("No amd connector matching CRTC-%d\n", crtc->index); + ret = -EINVAL; + goto cleanup; + } + + aux = (aconn->mst_output_port) ? &aconn->mst_output_port->aux : &aconn->dm_dp_aux.aux; + + if (!aux) { + DRM_DEBUG_DRIVER("No dp aux for amd connector\n"); + ret = -EINVAL; + goto cleanup; + } + + if ((aconn->base.connector_type != DRM_MODE_CONNECTOR_DisplayPort) && + (aconn->base.connector_type != DRM_MODE_CONNECTOR_eDP)) { + DRM_DEBUG_DRIVER("No DP connector available for CRC source\n"); + ret = -EINVAL; + goto cleanup; + } + + } + +#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) + /* Reset secure_display when we change crc source from debugfs */ + amdgpu_dm_set_crc_window_default(crtc, crtc_state->stream); +#endif + + if (amdgpu_dm_crtc_configure_crc_source(crtc, crtc_state, source)) { + ret = -EINVAL; + goto cleanup; + } + + /* + * Reading the CRC requires the vblank interrupt handler to be + * enabled. Keep a reference until CRC capture stops. + */ + enabled = amdgpu_dm_is_valid_crc_source(cur_crc_src); + if (!enabled && enable) { + ret = drm_crtc_vblank_get(crtc); + if (ret) + goto cleanup; + + if (dm_is_crc_source_dprx(source)) { + if (drm_dp_start_crc(aux, crtc)) { + DRM_DEBUG_DRIVER("dp start crc failed\n"); + ret = -EINVAL; + goto cleanup; + } + } + } else if (enabled && !enable) { + drm_crtc_vblank_put(crtc); + if (dm_is_crc_source_dprx(source)) { + if (drm_dp_stop_crc(aux)) { + DRM_DEBUG_DRIVER("dp stop crc failed\n"); + ret = -EINVAL; + goto cleanup; + } + } + } + + spin_lock_irq(&drm_dev->event_lock); + acrtc->dm_irq_params.crc_src = source; + spin_unlock_irq(&drm_dev->event_lock); + + /* Reset crc_skipped on dm state */ + crtc_state->crc_skip_count = 0; + +cleanup: + if (commit) + drm_crtc_commit_put(commit); + + drm_modeset_unlock(&crtc->mutex); + + return ret; +} + +/** + * amdgpu_dm_crtc_handle_crc_irq: Report to DRM the CRC on given CRTC. + * @crtc: DRM CRTC object. + * + * This function should be called at the end of a vblank, when the fb has been + * fully processed through the pipe. + */ +void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc) +{ + struct dm_crtc_state *crtc_state; + struct dc_stream_state *stream_state; + struct drm_device *drm_dev = NULL; + enum amdgpu_dm_pipe_crc_source cur_crc_src; + struct amdgpu_crtc *acrtc = NULL; + uint32_t crcs[3]; + unsigned long flags; + + if (crtc == NULL) + return; + + crtc_state = to_dm_crtc_state(crtc->state); + stream_state = crtc_state->stream; + acrtc = to_amdgpu_crtc(crtc); + drm_dev = crtc->dev; + + spin_lock_irqsave(&drm_dev->event_lock, flags); + cur_crc_src = acrtc->dm_irq_params.crc_src; + spin_unlock_irqrestore(&drm_dev->event_lock, flags); + + /* Early return if CRC capture is not enabled. */ + if (!amdgpu_dm_is_valid_crc_source(cur_crc_src)) + return; + + /* + * Since flipping and crc enablement happen asynchronously, we - more + * often than not - will be returning an 'uncooked' crc on first frame. + * Probably because hw isn't ready yet. For added security, skip the + * first two CRC values. + */ + if (crtc_state->crc_skip_count < 2) { + crtc_state->crc_skip_count += 1; + return; + } + + if (dm_is_crc_source_crtc(cur_crc_src)) { + if (!dc_stream_get_crc(stream_state->ctx->dc, stream_state, + &crcs[0], &crcs[1], &crcs[2])) + return; + + drm_crtc_add_crc_entry(crtc, true, + drm_crtc_accurate_vblank_count(crtc), crcs); + } +} + +#if defined(CONFIG_DRM_AMD_SECURE_DISPLAY) +void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc) +{ + struct drm_device *drm_dev = NULL; + enum amdgpu_dm_pipe_crc_source cur_crc_src; + struct amdgpu_crtc *acrtc = NULL; + struct amdgpu_device *adev = NULL; + struct secure_display_context *secure_display_ctx = NULL; + unsigned long flags1; + + if (crtc == NULL) + return; + + acrtc = to_amdgpu_crtc(crtc); + adev = drm_to_adev(crtc->dev); + drm_dev = crtc->dev; + + spin_lock_irqsave(&drm_dev->event_lock, flags1); + cur_crc_src = acrtc->dm_irq_params.crc_src; + + /* Early return if CRC capture is not enabled. */ + if (!amdgpu_dm_is_valid_crc_source(cur_crc_src) || + !dm_is_crc_source_crtc(cur_crc_src)) + goto cleanup; + + if (!acrtc->dm_irq_params.window_param.activated) + goto cleanup; + + if (acrtc->dm_irq_params.window_param.skip_frame_cnt) { + acrtc->dm_irq_params.window_param.skip_frame_cnt -= 1; + goto cleanup; + } + + secure_display_ctx = &adev->dm.secure_display_ctxs[acrtc->crtc_id]; + if (WARN_ON(secure_display_ctx->crtc != crtc)) { + /* We have set the crtc when creating secure_display_context, + * don't expect it to be changed here. + */ + secure_display_ctx->crtc = crtc; + } + + if (acrtc->dm_irq_params.window_param.update_win) { + /* prepare work for dmub to update ROI */ + secure_display_ctx->rect.x = acrtc->dm_irq_params.window_param.x_start; + secure_display_ctx->rect.y = acrtc->dm_irq_params.window_param.y_start; + secure_display_ctx->rect.width = acrtc->dm_irq_params.window_param.x_end - + acrtc->dm_irq_params.window_param.x_start; + secure_display_ctx->rect.height = acrtc->dm_irq_params.window_param.y_end - + acrtc->dm_irq_params.window_param.y_start; + schedule_work(&secure_display_ctx->forward_roi_work); + + acrtc->dm_irq_params.window_param.update_win = false; + + /* Statically skip 1 frame, because we may need to wait below things + * before sending ROI to dmub: + * 1. We defer the work by using system workqueue. + * 2. We may need to wait for dc_lock before accessing dmub. + */ + acrtc->dm_irq_params.window_param.skip_frame_cnt = 1; + + } else { + /* prepare work for psp to read ROI/CRC and send to I2C */ + schedule_work(&secure_display_ctx->notify_ta_work); + } + +cleanup: + spin_unlock_irqrestore(&drm_dev->event_lock, flags1); +} + +struct secure_display_context * +amdgpu_dm_crtc_secure_display_create_contexts(struct amdgpu_device *adev) +{ + struct secure_display_context *secure_display_ctxs = NULL; + int i; + + secure_display_ctxs = kcalloc(adev->mode_info.num_crtc, + sizeof(struct secure_display_context), + GFP_KERNEL); + + if (!secure_display_ctxs) + return NULL; + + for (i = 0; i < adev->mode_info.num_crtc; i++) { + INIT_WORK(&secure_display_ctxs[i].forward_roi_work, amdgpu_dm_forward_crc_window); + INIT_WORK(&secure_display_ctxs[i].notify_ta_work, amdgpu_dm_crtc_notify_ta_to_read); + secure_display_ctxs[i].crtc = &adev->mode_info.crtcs[i]->base; + } + + return secure_display_ctxs; +} +#endif diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h new file mode 100644 index 0000000000..748e80ef40 --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.h @@ -0,0 +1,106 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef AMD_DAL_DEV_AMDGPU_DM_AMDGPU_DM_CRC_H_ +#define AMD_DAL_DEV_AMDGPU_DM_AMDGPU_DM_CRC_H_ + +struct drm_crtc; +struct dm_crtc_state; + +enum amdgpu_dm_pipe_crc_source { + AMDGPU_DM_PIPE_CRC_SOURCE_NONE = 0, + AMDGPU_DM_PIPE_CRC_SOURCE_CRTC, + AMDGPU_DM_PIPE_CRC_SOURCE_CRTC_DITHER, + AMDGPU_DM_PIPE_CRC_SOURCE_DPRX, + AMDGPU_DM_PIPE_CRC_SOURCE_DPRX_DITHER, + AMDGPU_DM_PIPE_CRC_SOURCE_MAX, + AMDGPU_DM_PIPE_CRC_SOURCE_INVALID = -1, +}; + +#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY +struct crc_window_param { + uint16_t x_start; + uint16_t y_start; + uint16_t x_end; + uint16_t y_end; + /* CRC window is activated or not*/ + bool activated; + /* Update crc window during vertical blank or not */ + bool update_win; + /* skip reading/writing for few frames */ + int skip_frame_cnt; +}; + +struct secure_display_context { + /* work to notify PSP TA*/ + struct work_struct notify_ta_work; + + /* work to forward ROI to dmcu/dmub */ + struct work_struct forward_roi_work; + + struct drm_crtc *crtc; + + /* Region of Interest (ROI) */ + struct rect rect; +}; +#endif + +static inline bool amdgpu_dm_is_valid_crc_source(enum amdgpu_dm_pipe_crc_source source) +{ + return (source > AMDGPU_DM_PIPE_CRC_SOURCE_NONE) && + (source < AMDGPU_DM_PIPE_CRC_SOURCE_MAX); +} + +/* amdgpu_dm_crc.c */ +#ifdef CONFIG_DEBUG_FS +int amdgpu_dm_crtc_configure_crc_source(struct drm_crtc *crtc, + struct dm_crtc_state *dm_crtc_state, + enum amdgpu_dm_pipe_crc_source source); +int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name); +int amdgpu_dm_crtc_verify_crc_source(struct drm_crtc *crtc, + const char *src_name, + size_t *values_cnt); +const char *const *amdgpu_dm_crtc_get_crc_sources(struct drm_crtc *crtc, + size_t *count); +void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc); +#else +#define amdgpu_dm_crtc_set_crc_source NULL +#define amdgpu_dm_crtc_verify_crc_source NULL +#define amdgpu_dm_crtc_get_crc_sources NULL +#define amdgpu_dm_crtc_handle_crc_irq(x) +#endif + +#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY +bool amdgpu_dm_crc_window_is_activated(struct drm_crtc *crtc); +void amdgpu_dm_crtc_handle_crc_window_irq(struct drm_crtc *crtc); +struct secure_display_context *amdgpu_dm_crtc_secure_display_create_contexts( + struct amdgpu_device *adev); +#else +#define amdgpu_dm_crc_window_is_activated(x) +#define amdgpu_dm_crtc_handle_crc_window_irq(x) +#define amdgpu_dm_crtc_secure_display_create_contexts(x) +#endif + +#endif /* AMD_DAL_DEV_AMDGPU_DM_AMDGPU_DM_CRC_H_ */ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c new file mode 100644 index 0000000000..97b7a0b8a1 --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.c @@ -0,0 +1,499 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#include <drm/drm_vblank.h> +#include <drm/drm_atomic_helper.h> + +#include "dc.h" +#include "amdgpu.h" +#include "amdgpu_dm_psr.h" +#include "amdgpu_dm_replay.h" +#include "amdgpu_dm_crtc.h" +#include "amdgpu_dm_plane.h" +#include "amdgpu_dm_trace.h" +#include "amdgpu_dm_debugfs.h" + +void amdgpu_dm_crtc_handle_vblank(struct amdgpu_crtc *acrtc) +{ + struct drm_crtc *crtc = &acrtc->base; + struct drm_device *dev = crtc->dev; + unsigned long flags; + + drm_crtc_handle_vblank(crtc); + + spin_lock_irqsave(&dev->event_lock, flags); + + /* Send completion event for cursor-only commits */ + if (acrtc->event && acrtc->pflip_status != AMDGPU_FLIP_SUBMITTED) { + drm_crtc_send_vblank_event(crtc, acrtc->event); + drm_crtc_vblank_put(crtc); + acrtc->event = NULL; + } + + spin_unlock_irqrestore(&dev->event_lock, flags); +} + +bool amdgpu_dm_crtc_modeset_required(struct drm_crtc_state *crtc_state, + struct dc_stream_state *new_stream, + struct dc_stream_state *old_stream) +{ + return crtc_state->active && drm_atomic_crtc_needs_modeset(crtc_state); +} + +bool amdgpu_dm_crtc_vrr_active_irq(struct amdgpu_crtc *acrtc) + +{ + return acrtc->dm_irq_params.freesync_config.state == + VRR_STATE_ACTIVE_VARIABLE || + acrtc->dm_irq_params.freesync_config.state == + VRR_STATE_ACTIVE_FIXED; +} + +int amdgpu_dm_crtc_set_vupdate_irq(struct drm_crtc *crtc, bool enable) +{ + enum dc_irq_source irq_source; + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + struct amdgpu_device *adev = drm_to_adev(crtc->dev); + int rc; + + if (acrtc->otg_inst == -1) + return 0; + + irq_source = IRQ_TYPE_VUPDATE + acrtc->otg_inst; + + rc = dc_interrupt_set(adev->dm.dc, irq_source, enable) ? 0 : -EBUSY; + + DRM_DEBUG_VBL("crtc %d - vupdate irq %sabling: r=%d\n", + acrtc->crtc_id, enable ? "en" : "dis", rc); + return rc; +} + +bool amdgpu_dm_crtc_vrr_active(struct dm_crtc_state *dm_state) +{ + return dm_state->freesync_config.state == VRR_STATE_ACTIVE_VARIABLE || + dm_state->freesync_config.state == VRR_STATE_ACTIVE_FIXED; +} + +static void vblank_control_worker(struct work_struct *work) +{ + struct vblank_control_work *vblank_work = + container_of(work, struct vblank_control_work, work); + struct amdgpu_display_manager *dm = vblank_work->dm; + + mutex_lock(&dm->dc_lock); + + if (vblank_work->enable) + dm->active_vblank_irq_count++; + else if (dm->active_vblank_irq_count) + dm->active_vblank_irq_count--; + + dc_allow_idle_optimizations(dm->dc, dm->active_vblank_irq_count == 0); + + DRM_DEBUG_KMS("Allow idle optimizations (MALL): %d\n", dm->active_vblank_irq_count == 0); + + /* + * Control PSR based on vblank requirements from OS + * + * If panel supports PSR SU, there's no need to disable PSR when OS is + * submitting fast atomic commits (we infer this by whether the OS + * requests vblank events). Fast atomic commits will simply trigger a + * full-frame-update (FFU); a specific case of selective-update (SU) + * where the SU region is the full hactive*vactive region. See + * fill_dc_dirty_rects(). + */ + if (vblank_work->stream && vblank_work->stream->link) { + /* + * Prioritize replay, instead of psr + */ + if (vblank_work->stream->link->replay_settings.replay_feature_enabled) + amdgpu_dm_replay_enable(vblank_work->stream, false); + else if (vblank_work->enable) { + if (vblank_work->stream->link->psr_settings.psr_version < DC_PSR_VERSION_SU_1 && + vblank_work->stream->link->psr_settings.psr_allow_active) + amdgpu_dm_psr_disable(vblank_work->stream); + } else if (vblank_work->stream->link->psr_settings.psr_feature_enabled && + !vblank_work->stream->link->psr_settings.psr_allow_active && +#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY + !amdgpu_dm_crc_window_is_activated(&vblank_work->acrtc->base) && +#endif + vblank_work->stream->link->panel_config.psr.disallow_replay && + vblank_work->acrtc->dm_irq_params.allow_psr_entry) { + amdgpu_dm_psr_enable(vblank_work->stream); + } + } + + mutex_unlock(&dm->dc_lock); + + dc_stream_release(vblank_work->stream); + + kfree(vblank_work); +} + +static inline int dm_set_vblank(struct drm_crtc *crtc, bool enable) +{ + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + struct amdgpu_device *adev = drm_to_adev(crtc->dev); + struct dm_crtc_state *acrtc_state = to_dm_crtc_state(crtc->state); + struct amdgpu_display_manager *dm = &adev->dm; + struct vblank_control_work *work; + int rc = 0; + + if (acrtc->otg_inst == -1) + goto skip; + + if (enable) { + /* vblank irq on -> Only need vupdate irq in vrr mode */ + if (amdgpu_dm_crtc_vrr_active(acrtc_state)) + rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, true); + } else { + /* vblank irq off -> vupdate irq off */ + rc = amdgpu_dm_crtc_set_vupdate_irq(crtc, false); + } + + if (rc) + return rc; + + rc = (enable) + ? amdgpu_irq_get(adev, &adev->crtc_irq, acrtc->crtc_id) + : amdgpu_irq_put(adev, &adev->crtc_irq, acrtc->crtc_id); + + if (rc) + return rc; + +skip: + if (amdgpu_in_reset(adev)) + return 0; + + if (dm->vblank_control_workqueue) { + work = kzalloc(sizeof(*work), GFP_ATOMIC); + if (!work) + return -ENOMEM; + + INIT_WORK(&work->work, vblank_control_worker); + work->dm = dm; + work->acrtc = acrtc; + work->enable = enable; + + if (acrtc_state->stream) { + dc_stream_retain(acrtc_state->stream); + work->stream = acrtc_state->stream; + } + + queue_work(dm->vblank_control_workqueue, &work->work); + } + + return 0; +} + +int amdgpu_dm_crtc_enable_vblank(struct drm_crtc *crtc) +{ + return dm_set_vblank(crtc, true); +} + +void amdgpu_dm_crtc_disable_vblank(struct drm_crtc *crtc) +{ + dm_set_vblank(crtc, false); +} + +static void dm_crtc_destroy_state(struct drm_crtc *crtc, + struct drm_crtc_state *state) +{ + struct dm_crtc_state *cur = to_dm_crtc_state(state); + + /* TODO Destroy dc_stream objects are stream object is flattened */ + if (cur->stream) + dc_stream_release(cur->stream); + + + __drm_atomic_helper_crtc_destroy_state(state); + + + kfree(state); +} + +static struct drm_crtc_state *dm_crtc_duplicate_state(struct drm_crtc *crtc) +{ + struct dm_crtc_state *state, *cur; + + cur = to_dm_crtc_state(crtc->state); + + if (WARN_ON(!crtc->state)) + return NULL; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return NULL; + + __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base); + + if (cur->stream) { + state->stream = cur->stream; + dc_stream_retain(state->stream); + } + + state->active_planes = cur->active_planes; + state->vrr_infopacket = cur->vrr_infopacket; + state->abm_level = cur->abm_level; + state->vrr_supported = cur->vrr_supported; + state->freesync_config = cur->freesync_config; + state->cm_has_degamma = cur->cm_has_degamma; + state->cm_is_degamma_srgb = cur->cm_is_degamma_srgb; + state->crc_skip_count = cur->crc_skip_count; + state->mpo_requested = cur->mpo_requested; + /* TODO Duplicate dc_stream after objects are stream object is flattened */ + + return &state->base; +} + +static void amdgpu_dm_crtc_destroy(struct drm_crtc *crtc) +{ + drm_crtc_cleanup(crtc); + kfree(crtc); +} + +static void dm_crtc_reset_state(struct drm_crtc *crtc) +{ + struct dm_crtc_state *state; + + if (crtc->state) + dm_crtc_destroy_state(crtc, crtc->state); + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (WARN_ON(!state)) + return; + + __drm_atomic_helper_crtc_reset(crtc, &state->base); +} + +#ifdef CONFIG_DEBUG_FS +static int amdgpu_dm_crtc_late_register(struct drm_crtc *crtc) +{ + crtc_debugfs_init(crtc); + + return 0; +} +#endif + +/* Implemented only the options currently available for the driver */ +static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = { + .reset = dm_crtc_reset_state, + .destroy = amdgpu_dm_crtc_destroy, + .set_config = drm_atomic_helper_set_config, + .page_flip = drm_atomic_helper_page_flip, + .atomic_duplicate_state = dm_crtc_duplicate_state, + .atomic_destroy_state = dm_crtc_destroy_state, + .set_crc_source = amdgpu_dm_crtc_set_crc_source, + .verify_crc_source = amdgpu_dm_crtc_verify_crc_source, + .get_crc_sources = amdgpu_dm_crtc_get_crc_sources, + .get_vblank_counter = amdgpu_get_vblank_counter_kms, + .enable_vblank = amdgpu_dm_crtc_enable_vblank, + .disable_vblank = amdgpu_dm_crtc_disable_vblank, + .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp, +#if defined(CONFIG_DEBUG_FS) + .late_register = amdgpu_dm_crtc_late_register, +#endif +}; + +static void dm_crtc_helper_disable(struct drm_crtc *crtc) +{ +} + +static int count_crtc_active_planes(struct drm_crtc_state *new_crtc_state) +{ + struct drm_atomic_state *state = new_crtc_state->state; + struct drm_plane *plane; + int num_active = 0; + + drm_for_each_plane_mask(plane, state->dev, new_crtc_state->plane_mask) { + struct drm_plane_state *new_plane_state; + + /* Cursor planes are "fake". */ + if (plane->type == DRM_PLANE_TYPE_CURSOR) + continue; + + new_plane_state = drm_atomic_get_new_plane_state(state, plane); + + if (!new_plane_state) { + /* + * The plane is enable on the CRTC and hasn't changed + * state. This means that it previously passed + * validation and is therefore enabled. + */ + num_active += 1; + continue; + } + + /* We need a framebuffer to be considered enabled. */ + num_active += (new_plane_state->fb != NULL); + } + + return num_active; +} + +static void dm_update_crtc_active_planes(struct drm_crtc *crtc, + struct drm_crtc_state *new_crtc_state) +{ + struct dm_crtc_state *dm_new_crtc_state = + to_dm_crtc_state(new_crtc_state); + + dm_new_crtc_state->active_planes = 0; + + if (!dm_new_crtc_state->stream) + return; + + dm_new_crtc_state->active_planes = + count_crtc_active_planes(new_crtc_state); +} + +static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc, + const struct drm_display_mode *mode, + struct drm_display_mode *adjusted_mode) +{ + return true; +} + +static int dm_crtc_helper_atomic_check(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, + crtc); + struct amdgpu_device *adev = drm_to_adev(crtc->dev); + struct dc *dc = adev->dm.dc; + struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(crtc_state); + int ret = -EINVAL; + + trace_amdgpu_dm_crtc_atomic_check(crtc_state); + + dm_update_crtc_active_planes(crtc, crtc_state); + + if (WARN_ON(unlikely(!dm_crtc_state->stream && + amdgpu_dm_crtc_modeset_required(crtc_state, NULL, dm_crtc_state->stream)))) { + return ret; + } + + /* + * We require the primary plane to be enabled whenever the CRTC is, otherwise + * drm_mode_cursor_universal may end up trying to enable the cursor plane while all other + * planes are disabled, which is not supported by the hardware. And there is legacy + * userspace which stops using the HW cursor altogether in response to the resulting EINVAL. + */ + if (crtc_state->enable && + !(crtc_state->plane_mask & drm_plane_mask(crtc->primary))) { + DRM_DEBUG_ATOMIC("Can't enable a CRTC without enabling the primary plane\n"); + return -EINVAL; + } + + /* + * Only allow async flips for fast updates that don't change the FB + * pitch, the DCC state, rotation, etc. + */ + if (crtc_state->async_flip && + dm_crtc_state->update_type != UPDATE_TYPE_FAST) { + drm_dbg_atomic(crtc->dev, + "[CRTC:%d:%s] async flips are only supported for fast updates\n", + crtc->base.id, crtc->name); + return -EINVAL; + } + + /* In some use cases, like reset, no stream is attached */ + if (!dm_crtc_state->stream) + return 0; + + if (dc_validate_stream(dc, dm_crtc_state->stream) == DC_OK) + return 0; + + DRM_DEBUG_ATOMIC("Failed DC stream validation\n"); + return ret; +} + +static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = { + .disable = dm_crtc_helper_disable, + .atomic_check = dm_crtc_helper_atomic_check, + .mode_fixup = dm_crtc_helper_mode_fixup, + .get_scanout_position = amdgpu_crtc_get_scanout_position, +}; + +int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, + struct drm_plane *plane, + uint32_t crtc_index) +{ + struct amdgpu_crtc *acrtc = NULL; + struct drm_plane *cursor_plane; + bool is_dcn; + int res = -ENOMEM; + + cursor_plane = kzalloc(sizeof(*cursor_plane), GFP_KERNEL); + if (!cursor_plane) + goto fail; + + cursor_plane->type = DRM_PLANE_TYPE_CURSOR; + res = amdgpu_dm_plane_init(dm, cursor_plane, 0, NULL); + + acrtc = kzalloc(sizeof(struct amdgpu_crtc), GFP_KERNEL); + if (!acrtc) + goto fail; + + res = drm_crtc_init_with_planes( + dm->ddev, + &acrtc->base, + plane, + cursor_plane, + &amdgpu_dm_crtc_funcs, NULL); + + if (res) + goto fail; + + drm_crtc_helper_add(&acrtc->base, &amdgpu_dm_crtc_helper_funcs); + + /* Create (reset) the plane state */ + if (acrtc->base.funcs->reset) + acrtc->base.funcs->reset(&acrtc->base); + + acrtc->max_cursor_width = dm->adev->dm.dc->caps.max_cursor_size; + acrtc->max_cursor_height = dm->adev->dm.dc->caps.max_cursor_size; + + acrtc->crtc_id = crtc_index; + acrtc->base.enabled = false; + acrtc->otg_inst = -1; + + dm->adev->mode_info.crtcs[crtc_index] = acrtc; + + /* Don't enable DRM CRTC degamma property for DCE since it doesn't + * support programmable degamma anywhere. + */ + is_dcn = dm->adev->dm.dc->caps.color.dpp.dcn_arch; + drm_crtc_enable_color_mgmt(&acrtc->base, is_dcn ? MAX_COLOR_LUT_ENTRIES : 0, + true, MAX_COLOR_LUT_ENTRIES); + + drm_mode_crtc_set_gamma_size(&acrtc->base, MAX_COLOR_LEGACY_LUT_ENTRIES); + + return 0; + +fail: + kfree(acrtc); + kfree(cursor_plane); + return res; +} + diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h new file mode 100644 index 0000000000..17e948753f --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crtc.h @@ -0,0 +1,51 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __AMDGPU_DM_CRTC_H__ +#define __AMDGPU_DM_CRTC_H__ + +void amdgpu_dm_crtc_handle_vblank(struct amdgpu_crtc *acrtc); + +bool amdgpu_dm_crtc_modeset_required(struct drm_crtc_state *crtc_state, + struct dc_stream_state *new_stream, + struct dc_stream_state *old_stream); + +int amdgpu_dm_crtc_set_vupdate_irq(struct drm_crtc *crtc, bool enable); + +bool amdgpu_dm_crtc_vrr_active_irq(struct amdgpu_crtc *acrtc); + +bool amdgpu_dm_crtc_vrr_active(struct dm_crtc_state *dm_state); + +int amdgpu_dm_crtc_enable_vblank(struct drm_crtc *crtc); + +void amdgpu_dm_crtc_disable_vblank(struct drm_crtc *crtc); + +int amdgpu_dm_crtc_init(struct amdgpu_display_manager *dm, + struct drm_plane *plane, + uint32_t link_index); + +#endif + diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c new file mode 100644 index 0000000000..7c21e21bcc --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -0,0 +1,3835 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include <linux/string_helpers.h> +#include <linux/uaccess.h> + +#include "dc.h" +#include "amdgpu.h" +#include "amdgpu_dm.h" +#include "amdgpu_dm_debugfs.h" +#include "dm_helpers.h" +#include "dmub/dmub_srv.h" +#include "resource.h" +#include "dsc.h" +#include "link_hwss.h" +#include "dc/dc_dmub_srv.h" +#include "link/protocols/link_dp_capability.h" + +#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY +#include "amdgpu_dm_psr.h" +#endif + +struct dmub_debugfs_trace_header { + uint32_t entry_count; + uint32_t reserved[3]; +}; + +struct dmub_debugfs_trace_entry { + uint32_t trace_code; + uint32_t tick_count; + uint32_t param0; + uint32_t param1; +}; + +static const char *const mst_progress_status[] = { + "probe", + "remote_edid", + "allocate_new_payload", + "clear_allocated_payload", +}; + +/* parse_write_buffer_into_params - Helper function to parse debugfs write buffer into an array + * + * Function takes in attributes passed to debugfs write entry + * and writes into param array. + * The user passes max_param_num to identify maximum number of + * parameters that could be parsed. + * + */ +static int parse_write_buffer_into_params(char *wr_buf, uint32_t wr_buf_size, + long *param, const char __user *buf, + int max_param_num, + uint8_t *param_nums) +{ + char *wr_buf_ptr = NULL; + uint32_t wr_buf_count = 0; + int r; + char *sub_str = NULL; + const char delimiter[3] = {' ', '\n', '\0'}; + uint8_t param_index = 0; + + *param_nums = 0; + + wr_buf_ptr = wr_buf; + + /* r is bytes not be copied */ + if (copy_from_user(wr_buf_ptr, buf, wr_buf_size)) { + DRM_DEBUG_DRIVER("user data could not be read successfully\n"); + return -EFAULT; + } + + /* check number of parameters. isspace could not differ space and \n */ + while ((*wr_buf_ptr != 0xa) && (wr_buf_count < wr_buf_size)) { + /* skip space*/ + while (isspace(*wr_buf_ptr) && (wr_buf_count < wr_buf_size)) { + wr_buf_ptr++; + wr_buf_count++; + } + + if (wr_buf_count == wr_buf_size) + break; + + /* skip non-space*/ + while ((!isspace(*wr_buf_ptr)) && (wr_buf_count < wr_buf_size)) { + wr_buf_ptr++; + wr_buf_count++; + } + + (*param_nums)++; + + if (wr_buf_count == wr_buf_size) + break; + } + + if (*param_nums > max_param_num) + *param_nums = max_param_num; + + wr_buf_ptr = wr_buf; /* reset buf pointer */ + wr_buf_count = 0; /* number of char already checked */ + + while (isspace(*wr_buf_ptr) && (wr_buf_count < wr_buf_size)) { + wr_buf_ptr++; + wr_buf_count++; + } + + while (param_index < *param_nums) { + /* after strsep, wr_buf_ptr will be moved to after space */ + sub_str = strsep(&wr_buf_ptr, delimiter); + + r = kstrtol(sub_str, 16, &(param[param_index])); + + if (r) + DRM_DEBUG_DRIVER("string to int convert error code: %d\n", r); + + param_index++; + } + + return 0; +} + +/* function description + * get/ set DP configuration: lane_count, link_rate, spread_spectrum + * + * valid lane count value: 1, 2, 4 + * valid link rate value: + * 06h = 1.62Gbps per lane + * 0Ah = 2.7Gbps per lane + * 0Ch = 3.24Gbps per lane + * 14h = 5.4Gbps per lane + * 1Eh = 8.1Gbps per lane + * + * debugfs is located at /sys/kernel/debug/dri/0/DP-x/link_settings + * + * --- to get dp configuration + * + * cat /sys/kernel/debug/dri/0/DP-x/link_settings + * + * It will list current, verified, reported, preferred dp configuration. + * current -- for current video mode + * verified --- maximum configuration which pass link training + * reported --- DP rx report caps (DPCD register offset 0, 1 2) + * preferred --- user force settings + * + * --- set (or force) dp configuration + * + * echo <lane_count> <link_rate> > link_settings + * + * for example, to force to 2 lane, 2.7GHz, + * echo 4 0xa > /sys/kernel/debug/dri/0/DP-x/link_settings + * + * spread_spectrum could not be changed dynamically. + * + * in case invalid lane count, link rate are force, no hw programming will be + * done. please check link settings after force operation to see if HW get + * programming. + * + * cat /sys/kernel/debug/dri/0/DP-x/link_settings + * + * check current and preferred settings. + * + */ +static ssize_t dp_link_settings_read(struct file *f, char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_dm_connector *connector = file_inode(f)->i_private; + struct dc_link *link = connector->dc_link; + char *rd_buf = NULL; + char *rd_buf_ptr = NULL; + const uint32_t rd_buf_size = 100; + uint32_t result = 0; + uint8_t str_len = 0; + int r; + + if (*pos & 3 || size & 3) + return -EINVAL; + + rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL); + if (!rd_buf) + return 0; + + rd_buf_ptr = rd_buf; + + str_len = strlen("Current: %d 0x%x %d "); + snprintf(rd_buf_ptr, str_len, "Current: %d 0x%x %d ", + link->cur_link_settings.lane_count, + link->cur_link_settings.link_rate, + link->cur_link_settings.link_spread); + rd_buf_ptr += str_len; + + str_len = strlen("Verified: %d 0x%x %d "); + snprintf(rd_buf_ptr, str_len, "Verified: %d 0x%x %d ", + link->verified_link_cap.lane_count, + link->verified_link_cap.link_rate, + link->verified_link_cap.link_spread); + rd_buf_ptr += str_len; + + str_len = strlen("Reported: %d 0x%x %d "); + snprintf(rd_buf_ptr, str_len, "Reported: %d 0x%x %d ", + link->reported_link_cap.lane_count, + link->reported_link_cap.link_rate, + link->reported_link_cap.link_spread); + rd_buf_ptr += str_len; + + str_len = strlen("Preferred: %d 0x%x %d "); + snprintf(rd_buf_ptr, str_len, "Preferred: %d 0x%x %d\n", + link->preferred_link_setting.lane_count, + link->preferred_link_setting.link_rate, + link->preferred_link_setting.link_spread); + + while (size) { + if (*pos >= rd_buf_size) + break; + + r = put_user(*(rd_buf + result), buf); + if (r) { + kfree(rd_buf); + return r; /* r = -EFAULT */ + } + + buf += 1; + size -= 1; + *pos += 1; + result += 1; + } + + kfree(rd_buf); + return result; +} + +static ssize_t dp_link_settings_write(struct file *f, const char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_dm_connector *connector = file_inode(f)->i_private; + struct dc_link *link = connector->dc_link; + struct amdgpu_device *adev = drm_to_adev(connector->base.dev); + struct dc *dc = (struct dc *)link->dc; + struct dc_link_settings prefer_link_settings; + char *wr_buf = NULL; + const uint32_t wr_buf_size = 40; + /* 0: lane_count; 1: link_rate */ + int max_param_num = 2; + uint8_t param_nums = 0; + long param[2]; + bool valid_input = true; + + if (size == 0) + return -EINVAL; + + wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL); + if (!wr_buf) + return -ENOSPC; + + if (parse_write_buffer_into_params(wr_buf, wr_buf_size, + (long *)param, buf, + max_param_num, + ¶m_nums)) { + kfree(wr_buf); + return -EINVAL; + } + + if (param_nums <= 0) { + kfree(wr_buf); + DRM_DEBUG_DRIVER("user data not be read\n"); + return -EINVAL; + } + + switch (param[0]) { + case LANE_COUNT_ONE: + case LANE_COUNT_TWO: + case LANE_COUNT_FOUR: + break; + default: + valid_input = false; + break; + } + + switch (param[1]) { + case LINK_RATE_LOW: + case LINK_RATE_HIGH: + case LINK_RATE_RBR2: + case LINK_RATE_HIGH2: + case LINK_RATE_HIGH3: + case LINK_RATE_UHBR10: + case LINK_RATE_UHBR13_5: + case LINK_RATE_UHBR20: + break; + default: + valid_input = false; + break; + } + + if (!valid_input) { + kfree(wr_buf); + DRM_DEBUG_DRIVER("Invalid Input value No HW will be programmed\n"); + mutex_lock(&adev->dm.dc_lock); + dc_link_set_preferred_training_settings(dc, NULL, NULL, link, false); + mutex_unlock(&adev->dm.dc_lock); + return size; + } + + /* save user force lane_count, link_rate to preferred settings + * spread spectrum will not be changed + */ + prefer_link_settings.link_spread = link->cur_link_settings.link_spread; + prefer_link_settings.use_link_rate_set = false; + prefer_link_settings.lane_count = param[0]; + prefer_link_settings.link_rate = param[1]; + + mutex_lock(&adev->dm.dc_lock); + dc_link_set_preferred_training_settings(dc, &prefer_link_settings, NULL, link, false); + mutex_unlock(&adev->dm.dc_lock); + + kfree(wr_buf); + return size; +} + +static bool dp_mst_is_end_device(struct amdgpu_dm_connector *aconnector) +{ + bool is_end_device = false; + struct drm_dp_mst_topology_mgr *mgr = NULL; + struct drm_dp_mst_port *port = NULL; + + if (aconnector->mst_root && aconnector->mst_root->mst_mgr.mst_state) { + mgr = &aconnector->mst_root->mst_mgr; + port = aconnector->mst_output_port; + + drm_modeset_lock(&mgr->base.lock, NULL); + if (port->pdt == DP_PEER_DEVICE_SST_SINK || + port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV) + is_end_device = true; + drm_modeset_unlock(&mgr->base.lock); + } + + return is_end_device; +} + +/* Change MST link setting + * + * valid lane count value: 1, 2, 4 + * valid link rate value: + * 06h = 1.62Gbps per lane + * 0Ah = 2.7Gbps per lane + * 0Ch = 3.24Gbps per lane + * 14h = 5.4Gbps per lane + * 1Eh = 8.1Gbps per lane + * 3E8h = 10.0Gbps per lane + * 546h = 13.5Gbps per lane + * 7D0h = 20.0Gbps per lane + * + * debugfs is located at /sys/kernel/debug/dri/0/DP-x/mst_link_settings + * + * for example, to force to 2 lane, 10.0GHz, + * echo 2 0x3e8 > /sys/kernel/debug/dri/0/DP-x/mst_link_settings + * + * Valid input will trigger hotplug event to get new link setting applied + * Invalid input will trigger training setting reset + * + * The usage can be referred to link_settings entry + * + */ +static ssize_t dp_mst_link_setting(struct file *f, const char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; + struct dc_link *link = aconnector->dc_link; + struct amdgpu_device *adev = drm_to_adev(aconnector->base.dev); + struct dc *dc = (struct dc *)link->dc; + struct dc_link_settings prefer_link_settings; + char *wr_buf = NULL; + const uint32_t wr_buf_size = 40; + /* 0: lane_count; 1: link_rate */ + int max_param_num = 2; + uint8_t param_nums = 0; + long param[2]; + bool valid_input = true; + + if (!dp_mst_is_end_device(aconnector)) + return -EINVAL; + + if (size == 0) + return -EINVAL; + + wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL); + if (!wr_buf) + return -ENOSPC; + + if (parse_write_buffer_into_params(wr_buf, wr_buf_size, + (long *)param, buf, + max_param_num, + ¶m_nums)) { + kfree(wr_buf); + return -EINVAL; + } + + if (param_nums <= 0) { + kfree(wr_buf); + DRM_DEBUG_DRIVER("user data not be read\n"); + return -EINVAL; + } + + switch (param[0]) { + case LANE_COUNT_ONE: + case LANE_COUNT_TWO: + case LANE_COUNT_FOUR: + break; + default: + valid_input = false; + break; + } + + switch (param[1]) { + case LINK_RATE_LOW: + case LINK_RATE_HIGH: + case LINK_RATE_RBR2: + case LINK_RATE_HIGH2: + case LINK_RATE_HIGH3: + case LINK_RATE_UHBR10: + case LINK_RATE_UHBR13_5: + case LINK_RATE_UHBR20: + break; + default: + valid_input = false; + break; + } + + if (!valid_input) { + kfree(wr_buf); + DRM_DEBUG_DRIVER("Invalid Input value No HW will be programmed\n"); + mutex_lock(&adev->dm.dc_lock); + dc_link_set_preferred_training_settings(dc, NULL, NULL, link, false); + mutex_unlock(&adev->dm.dc_lock); + return -EINVAL; + } + + /* save user force lane_count, link_rate to preferred settings + * spread spectrum will not be changed + */ + prefer_link_settings.link_spread = link->cur_link_settings.link_spread; + prefer_link_settings.use_link_rate_set = false; + prefer_link_settings.lane_count = param[0]; + prefer_link_settings.link_rate = param[1]; + + /* skip immediate retrain, and train to new link setting after hotplug event triggered */ + mutex_lock(&adev->dm.dc_lock); + dc_link_set_preferred_training_settings(dc, &prefer_link_settings, NULL, link, true); + mutex_unlock(&adev->dm.dc_lock); + + mutex_lock(&aconnector->base.dev->mode_config.mutex); + aconnector->base.force = DRM_FORCE_OFF; + mutex_unlock(&aconnector->base.dev->mode_config.mutex); + drm_kms_helper_hotplug_event(aconnector->base.dev); + + msleep(100); + + mutex_lock(&aconnector->base.dev->mode_config.mutex); + aconnector->base.force = DRM_FORCE_UNSPECIFIED; + mutex_unlock(&aconnector->base.dev->mode_config.mutex); + drm_kms_helper_hotplug_event(aconnector->base.dev); + + kfree(wr_buf); + return size; +} + +/* function: get current DP PHY settings: voltage swing, pre-emphasis, + * post-cursor2 (defined by VESA DP specification) + * + * valid values + * voltage swing: 0,1,2,3 + * pre-emphasis : 0,1,2,3 + * post cursor2 : 0,1,2,3 + * + * + * how to use this debugfs + * + * debugfs is located at /sys/kernel/debug/dri/0/DP-x + * + * there will be directories, like DP-1, DP-2,DP-3, etc. for DP display + * + * To figure out which DP-x is the display for DP to be check, + * cd DP-x + * ls -ll + * There should be debugfs file, like link_settings, phy_settings. + * cat link_settings + * from lane_count, link_rate to figure which DP-x is for display to be worked + * on + * + * To get current DP PHY settings, + * cat phy_settings + * + * To change DP PHY settings, + * echo <voltage_swing> <pre-emphasis> <post_cursor2> > phy_settings + * for examle, to change voltage swing to 2, pre-emphasis to 3, post_cursor2 to + * 0, + * echo 2 3 0 > phy_settings + * + * To check if change be applied, get current phy settings by + * cat phy_settings + * + * In case invalid values are set by user, like + * echo 1 4 0 > phy_settings + * + * HW will NOT be programmed by these settings. + * cat phy_settings will show the previous valid settings. + */ +static ssize_t dp_phy_settings_read(struct file *f, char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_dm_connector *connector = file_inode(f)->i_private; + struct dc_link *link = connector->dc_link; + char *rd_buf = NULL; + const uint32_t rd_buf_size = 20; + uint32_t result = 0; + int r; + + if (*pos & 3 || size & 3) + return -EINVAL; + + rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL); + if (!rd_buf) + return -EINVAL; + + snprintf(rd_buf, rd_buf_size, " %d %d %d\n", + link->cur_lane_setting[0].VOLTAGE_SWING, + link->cur_lane_setting[0].PRE_EMPHASIS, + link->cur_lane_setting[0].POST_CURSOR2); + + while (size) { + if (*pos >= rd_buf_size) + break; + + r = put_user((*(rd_buf + result)), buf); + if (r) { + kfree(rd_buf); + return r; /* r = -EFAULT */ + } + + buf += 1; + size -= 1; + *pos += 1; + result += 1; + } + + kfree(rd_buf); + return result; +} + +static int dp_lttpr_status_show(struct seq_file *m, void *unused) +{ + struct drm_connector *connector = m->private; + struct amdgpu_dm_connector *aconnector = + to_amdgpu_dm_connector(connector); + struct dc_lttpr_caps caps = aconnector->dc_link->dpcd_caps.lttpr_caps; + + if (connector->status != connector_status_connected) + return -ENODEV; + + seq_printf(m, "phy repeater count: %u (raw: 0x%x)\n", + dp_parse_lttpr_repeater_count(caps.phy_repeater_cnt), + caps.phy_repeater_cnt); + + seq_puts(m, "phy repeater mode: "); + + switch (caps.mode) { + case DP_PHY_REPEATER_MODE_TRANSPARENT: + seq_puts(m, "transparent"); + break; + case DP_PHY_REPEATER_MODE_NON_TRANSPARENT: + seq_puts(m, "non-transparent"); + break; + case 0x00: + seq_puts(m, "non lttpr"); + break; + default: + seq_printf(m, "read error (raw: 0x%x)", caps.mode); + break; + } + + seq_puts(m, "\n"); + return 0; +} + +static ssize_t dp_phy_settings_write(struct file *f, const char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_dm_connector *connector = file_inode(f)->i_private; + struct dc_link *link = connector->dc_link; + struct dc *dc = (struct dc *)link->dc; + char *wr_buf = NULL; + uint32_t wr_buf_size = 40; + long param[3]; + bool use_prefer_link_setting; + struct link_training_settings link_lane_settings; + int max_param_num = 3; + uint8_t param_nums = 0; + int r = 0; + + + if (size == 0) + return -EINVAL; + + wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL); + if (!wr_buf) + return -ENOSPC; + + if (parse_write_buffer_into_params(wr_buf, wr_buf_size, + (long *)param, buf, + max_param_num, + ¶m_nums)) { + kfree(wr_buf); + return -EINVAL; + } + + if (param_nums <= 0) { + kfree(wr_buf); + DRM_DEBUG_DRIVER("user data not be read\n"); + return -EINVAL; + } + + if ((param[0] > VOLTAGE_SWING_MAX_LEVEL) || + (param[1] > PRE_EMPHASIS_MAX_LEVEL) || + (param[2] > POST_CURSOR2_MAX_LEVEL)) { + kfree(wr_buf); + DRM_DEBUG_DRIVER("Invalid Input No HW will be programmed\n"); + return size; + } + + /* get link settings: lane count, link rate */ + use_prefer_link_setting = + ((link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN) && + (link->test_pattern_enabled)); + + memset(&link_lane_settings, 0, sizeof(link_lane_settings)); + + if (use_prefer_link_setting) { + link_lane_settings.link_settings.lane_count = + link->preferred_link_setting.lane_count; + link_lane_settings.link_settings.link_rate = + link->preferred_link_setting.link_rate; + link_lane_settings.link_settings.link_spread = + link->preferred_link_setting.link_spread; + } else { + link_lane_settings.link_settings.lane_count = + link->cur_link_settings.lane_count; + link_lane_settings.link_settings.link_rate = + link->cur_link_settings.link_rate; + link_lane_settings.link_settings.link_spread = + link->cur_link_settings.link_spread; + } + + /* apply phy settings from user */ + for (r = 0; r < link_lane_settings.link_settings.lane_count; r++) { + link_lane_settings.hw_lane_settings[r].VOLTAGE_SWING = + (enum dc_voltage_swing) (param[0]); + link_lane_settings.hw_lane_settings[r].PRE_EMPHASIS = + (enum dc_pre_emphasis) (param[1]); + link_lane_settings.hw_lane_settings[r].POST_CURSOR2 = + (enum dc_post_cursor2) (param[2]); + } + + /* program ASIC registers and DPCD registers */ + dc_link_set_drive_settings(dc, &link_lane_settings, link); + + kfree(wr_buf); + return size; +} + +/* function description + * + * set PHY layer or Link layer test pattern + * PHY test pattern is used for PHY SI check. + * Link layer test will not affect PHY SI. + * + * Reset Test Pattern: + * 0 = DP_TEST_PATTERN_VIDEO_MODE + * + * PHY test pattern supported: + * 1 = DP_TEST_PATTERN_D102 + * 2 = DP_TEST_PATTERN_SYMBOL_ERROR + * 3 = DP_TEST_PATTERN_PRBS7 + * 4 = DP_TEST_PATTERN_80BIT_CUSTOM + * 5 = DP_TEST_PATTERN_CP2520_1 + * 6 = DP_TEST_PATTERN_CP2520_2 = DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE + * 7 = DP_TEST_PATTERN_CP2520_3 + * + * DP PHY Link Training Patterns + * 8 = DP_TEST_PATTERN_TRAINING_PATTERN1 + * 9 = DP_TEST_PATTERN_TRAINING_PATTERN2 + * a = DP_TEST_PATTERN_TRAINING_PATTERN3 + * b = DP_TEST_PATTERN_TRAINING_PATTERN4 + * + * DP Link Layer Test pattern + * c = DP_TEST_PATTERN_COLOR_SQUARES + * d = DP_TEST_PATTERN_COLOR_SQUARES_CEA + * e = DP_TEST_PATTERN_VERTICAL_BARS + * f = DP_TEST_PATTERN_HORIZONTAL_BARS + * 10= DP_TEST_PATTERN_COLOR_RAMP + * + * debugfs phy_test_pattern is located at /syskernel/debug/dri/0/DP-x + * + * --- set test pattern + * echo <test pattern #> > test_pattern + * + * If test pattern # is not supported, NO HW programming will be done. + * for DP_TEST_PATTERN_80BIT_CUSTOM, it needs extra 10 bytes of data + * for the user pattern. input 10 bytes data are separated by space + * + * echo 0x4 0x11 0x22 0x33 0x44 0x55 0x66 0x77 0x88 0x99 0xaa > test_pattern + * + * --- reset test pattern + * echo 0 > test_pattern + * + * --- HPD detection is disabled when set PHY test pattern + * + * when PHY test pattern (pattern # within [1,7]) is set, HPD pin of HW ASIC + * is disable. User could unplug DP display from DP connected and plug scope to + * check test pattern PHY SI. + * If there is need unplug scope and plug DP display back, do steps below: + * echo 0 > phy_test_pattern + * unplug scope + * plug DP display. + * + * "echo 0 > phy_test_pattern" will re-enable HPD pin again so that video sw + * driver could detect "unplug scope" and "plug DP display" + */ +static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_dm_connector *connector = file_inode(f)->i_private; + struct dc_link *link = connector->dc_link; + char *wr_buf = NULL; + uint32_t wr_buf_size = 100; + long param[11] = {0x0}; + int max_param_num = 11; + enum dp_test_pattern test_pattern = DP_TEST_PATTERN_UNSUPPORTED; + bool disable_hpd = false; + bool valid_test_pattern = false; + uint8_t param_nums = 0; + /* init with default 80bit custom pattern */ + uint8_t custom_pattern[10] = { + 0x1f, 0x7c, 0xf0, 0xc1, 0x07, + 0x1f, 0x7c, 0xf0, 0xc1, 0x07 + }; + struct dc_link_settings prefer_link_settings = {LANE_COUNT_UNKNOWN, + LINK_RATE_UNKNOWN, LINK_SPREAD_DISABLED}; + struct dc_link_settings cur_link_settings = {LANE_COUNT_UNKNOWN, + LINK_RATE_UNKNOWN, LINK_SPREAD_DISABLED}; + struct link_training_settings link_training_settings; + int i; + + if (size == 0) + return -EINVAL; + + wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL); + if (!wr_buf) + return -ENOSPC; + + if (parse_write_buffer_into_params(wr_buf, wr_buf_size, + (long *)param, buf, + max_param_num, + ¶m_nums)) { + kfree(wr_buf); + return -EINVAL; + } + + if (param_nums <= 0) { + kfree(wr_buf); + DRM_DEBUG_DRIVER("user data not be read\n"); + return -EINVAL; + } + + + test_pattern = param[0]; + + switch (test_pattern) { + case DP_TEST_PATTERN_VIDEO_MODE: + case DP_TEST_PATTERN_COLOR_SQUARES: + case DP_TEST_PATTERN_COLOR_SQUARES_CEA: + case DP_TEST_PATTERN_VERTICAL_BARS: + case DP_TEST_PATTERN_HORIZONTAL_BARS: + case DP_TEST_PATTERN_COLOR_RAMP: + valid_test_pattern = true; + break; + + case DP_TEST_PATTERN_D102: + case DP_TEST_PATTERN_SYMBOL_ERROR: + case DP_TEST_PATTERN_PRBS7: + case DP_TEST_PATTERN_80BIT_CUSTOM: + case DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE: + case DP_TEST_PATTERN_TRAINING_PATTERN4: + disable_hpd = true; + valid_test_pattern = true; + break; + + default: + valid_test_pattern = false; + test_pattern = DP_TEST_PATTERN_UNSUPPORTED; + break; + } + + if (!valid_test_pattern) { + kfree(wr_buf); + DRM_DEBUG_DRIVER("Invalid Test Pattern Parameters\n"); + return size; + } + + if (test_pattern == DP_TEST_PATTERN_80BIT_CUSTOM) { + for (i = 0; i < 10; i++) { + if ((uint8_t) param[i + 1] != 0x0) + break; + } + + if (i < 10) { + /* not use default value */ + for (i = 0; i < 10; i++) + custom_pattern[i] = (uint8_t) param[i + 1]; + } + } + + /* Usage: set DP physical test pattern using debugfs with normal DP + * panel. Then plug out DP panel and connect a scope to measure + * For normal video mode and test pattern generated from CRCT, + * they are visibile to user. So do not disable HPD. + * Video Mode is also set to clear the test pattern, so enable HPD + * because it might have been disabled after a test pattern was set. + * AUX depends on HPD * sequence dependent, do not move! + */ + if (!disable_hpd) + dc_link_enable_hpd(link); + + prefer_link_settings.lane_count = link->verified_link_cap.lane_count; + prefer_link_settings.link_rate = link->verified_link_cap.link_rate; + prefer_link_settings.link_spread = link->verified_link_cap.link_spread; + + cur_link_settings.lane_count = link->cur_link_settings.lane_count; + cur_link_settings.link_rate = link->cur_link_settings.link_rate; + cur_link_settings.link_spread = link->cur_link_settings.link_spread; + + link_training_settings.link_settings = cur_link_settings; + + + if (test_pattern != DP_TEST_PATTERN_VIDEO_MODE) { + if (prefer_link_settings.lane_count != LANE_COUNT_UNKNOWN && + prefer_link_settings.link_rate != LINK_RATE_UNKNOWN && + (prefer_link_settings.lane_count != cur_link_settings.lane_count || + prefer_link_settings.link_rate != cur_link_settings.link_rate)) + link_training_settings.link_settings = prefer_link_settings; + } + + for (i = 0; i < (unsigned int)(link_training_settings.link_settings.lane_count); i++) + link_training_settings.hw_lane_settings[i] = link->cur_lane_setting[i]; + + dc_link_dp_set_test_pattern( + link, + test_pattern, + DP_TEST_PATTERN_COLOR_SPACE_RGB, + &link_training_settings, + custom_pattern, + 10); + + /* Usage: Set DP physical test pattern using AMDDP with normal DP panel + * Then plug out DP panel and connect a scope to measure DP PHY signal. + * Need disable interrupt to avoid SW driver disable DP output. This is + * done after the test pattern is set. + */ + if (valid_test_pattern && disable_hpd) + dc_link_disable_hpd(link); + + kfree(wr_buf); + + return size; +} + +/* + * Returns the DMCUB tracebuffer contents. + * Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_dmub_tracebuffer + */ +static int dmub_tracebuffer_show(struct seq_file *m, void *data) +{ + struct amdgpu_device *adev = m->private; + struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info; + struct dmub_debugfs_trace_entry *entries; + uint8_t *tbuf_base; + uint32_t tbuf_size, max_entries, num_entries, i; + + if (!fb_info) + return 0; + + tbuf_base = (uint8_t *)fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr; + if (!tbuf_base) + return 0; + + tbuf_size = fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size; + max_entries = (tbuf_size - sizeof(struct dmub_debugfs_trace_header)) / + sizeof(struct dmub_debugfs_trace_entry); + + num_entries = + ((struct dmub_debugfs_trace_header *)tbuf_base)->entry_count; + + num_entries = min(num_entries, max_entries); + + entries = (struct dmub_debugfs_trace_entry + *)(tbuf_base + + sizeof(struct dmub_debugfs_trace_header)); + + for (i = 0; i < num_entries; ++i) { + struct dmub_debugfs_trace_entry *entry = &entries[i]; + + seq_printf(m, + "trace_code=%u tick_count=%u param0=%u param1=%u\n", + entry->trace_code, entry->tick_count, entry->param0, + entry->param1); + } + + return 0; +} + +/* + * Returns the DMCUB firmware state contents. + * Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_dmub_fw_state + */ +static int dmub_fw_state_show(struct seq_file *m, void *data) +{ + struct amdgpu_device *adev = m->private; + struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info; + uint8_t *state_base; + uint32_t state_size; + + if (!fb_info) + return 0; + + state_base = (uint8_t *)fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr; + if (!state_base) + return 0; + + state_size = fb_info->fb[DMUB_WINDOW_6_FW_STATE].size; + + return seq_write(m, state_base, state_size); +} + +/* psr_capability_show() - show eDP panel PSR capability + * + * The read function: sink_psr_capability_show + * Shows if sink has PSR capability or not. + * If yes - the PSR version is appended + * + * cat /sys/kernel/debug/dri/0/eDP-X/psr_capability + * + * Expected output: + * "Sink support: no\n" - if panel doesn't support PSR + * "Sink support: yes [0x01]\n" - if panel supports PSR1 + * "Driver support: no\n" - if driver doesn't support PSR + * "Driver support: yes [0x01]\n" - if driver supports PSR1 + */ +static int psr_capability_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + struct dc_link *link = aconnector->dc_link; + + if (!link) + return -ENODEV; + + if (link->type == dc_connection_none) + return -ENODEV; + + if (!(link->connector_signal & SIGNAL_TYPE_EDP)) + return -ENODEV; + + seq_printf(m, "Sink support: %s", str_yes_no(link->dpcd_caps.psr_info.psr_version != 0)); + if (link->dpcd_caps.psr_info.psr_version) + seq_printf(m, " [0x%02x]", link->dpcd_caps.psr_info.psr_version); + seq_puts(m, "\n"); + + seq_printf(m, "Driver support: %s", str_yes_no(link->psr_settings.psr_feature_enabled)); + if (link->psr_settings.psr_version) + seq_printf(m, " [0x%02x]", link->psr_settings.psr_version); + seq_puts(m, "\n"); + + return 0; +} + +/* + * Returns the current bpc for the crtc. + * Example usage: cat /sys/kernel/debug/dri/0/crtc-0/amdgpu_current_bpc + */ +static int amdgpu_current_bpc_show(struct seq_file *m, void *data) +{ + struct drm_crtc *crtc = m->private; + struct drm_device *dev = crtc->dev; + struct dm_crtc_state *dm_crtc_state = NULL; + int res = -ENODEV; + unsigned int bpc; + + mutex_lock(&dev->mode_config.mutex); + drm_modeset_lock(&crtc->mutex, NULL); + if (crtc->state == NULL) + goto unlock; + + dm_crtc_state = to_dm_crtc_state(crtc->state); + if (dm_crtc_state->stream == NULL) + goto unlock; + + switch (dm_crtc_state->stream->timing.display_color_depth) { + case COLOR_DEPTH_666: + bpc = 6; + break; + case COLOR_DEPTH_888: + bpc = 8; + break; + case COLOR_DEPTH_101010: + bpc = 10; + break; + case COLOR_DEPTH_121212: + bpc = 12; + break; + case COLOR_DEPTH_161616: + bpc = 16; + break; + default: + goto unlock; + } + + seq_printf(m, "Current: %u\n", bpc); + res = 0; + +unlock: + drm_modeset_unlock(&crtc->mutex); + mutex_unlock(&dev->mode_config.mutex); + + return res; +} +DEFINE_SHOW_ATTRIBUTE(amdgpu_current_bpc); + +/* + * Returns the current colorspace for the crtc. + * Example usage: cat /sys/kernel/debug/dri/0/crtc-0/amdgpu_current_colorspace + */ +static int amdgpu_current_colorspace_show(struct seq_file *m, void *data) +{ + struct drm_crtc *crtc = m->private; + struct drm_device *dev = crtc->dev; + struct dm_crtc_state *dm_crtc_state = NULL; + int res = -ENODEV; + + mutex_lock(&dev->mode_config.mutex); + drm_modeset_lock(&crtc->mutex, NULL); + if (crtc->state == NULL) + goto unlock; + + dm_crtc_state = to_dm_crtc_state(crtc->state); + if (dm_crtc_state->stream == NULL) + goto unlock; + + switch (dm_crtc_state->stream->output_color_space) { + case COLOR_SPACE_SRGB: + seq_puts(m, "sRGB"); + break; + case COLOR_SPACE_YCBCR601: + case COLOR_SPACE_YCBCR601_LIMITED: + seq_puts(m, "BT601_YCC"); + break; + case COLOR_SPACE_YCBCR709: + case COLOR_SPACE_YCBCR709_LIMITED: + seq_puts(m, "BT709_YCC"); + break; + case COLOR_SPACE_ADOBERGB: + seq_puts(m, "opRGB"); + break; + case COLOR_SPACE_2020_RGB_FULLRANGE: + seq_puts(m, "BT2020_RGB"); + break; + case COLOR_SPACE_2020_YCBCR: + seq_puts(m, "BT2020_YCC"); + break; + default: + goto unlock; + } + res = 0; + +unlock: + drm_modeset_unlock(&crtc->mutex); + mutex_unlock(&dev->mode_config.mutex); + + return res; +} +DEFINE_SHOW_ATTRIBUTE(amdgpu_current_colorspace); + + +/* + * Example usage: + * Disable dsc passthrough, i.e.,: have dsc decoding at converver, not external RX + * echo 1 /sys/kernel/debug/dri/0/DP-1/dsc_disable_passthrough + * Enable dsc passthrough, i.e.,: have dsc passthrough to external RX + * echo 0 /sys/kernel/debug/dri/0/DP-1/dsc_disable_passthrough + */ +static ssize_t dp_dsc_passthrough_set(struct file *f, const char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; + char *wr_buf = NULL; + uint32_t wr_buf_size = 42; + int max_param_num = 1; + long param; + uint8_t param_nums = 0; + + if (size == 0) + return -EINVAL; + + wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL); + + if (!wr_buf) { + DRM_DEBUG_DRIVER("no memory to allocate write buffer\n"); + return -ENOSPC; + } + + if (parse_write_buffer_into_params(wr_buf, wr_buf_size, + ¶m, buf, + max_param_num, + ¶m_nums)) { + kfree(wr_buf); + return -EINVAL; + } + + aconnector->dsc_settings.dsc_force_disable_passthrough = param; + + kfree(wr_buf); + return 0; +} + +/* + * Returns the HDCP capability of the Display (1.4 for now). + * + * NOTE* Not all HDMI displays report their HDCP caps even when they are capable. + * Since its rare for a display to not be HDCP 1.4 capable, we set HDMI as always capable. + * + * Example usage: cat /sys/kernel/debug/dri/0/DP-1/hdcp_sink_capability + * or cat /sys/kernel/debug/dri/0/HDMI-A-1/hdcp_sink_capability + */ +static int hdcp_sink_capability_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + bool hdcp_cap, hdcp2_cap; + + if (connector->status != connector_status_connected) + return -ENODEV; + + seq_printf(m, "%s:%d HDCP version: ", connector->name, connector->base.id); + + hdcp_cap = dc_link_is_hdcp14(aconnector->dc_link, aconnector->dc_sink->sink_signal); + hdcp2_cap = dc_link_is_hdcp22(aconnector->dc_link, aconnector->dc_sink->sink_signal); + + + if (hdcp_cap) + seq_printf(m, "%s ", "HDCP1.4"); + if (hdcp2_cap) + seq_printf(m, "%s ", "HDCP2.2"); + + if (!hdcp_cap && !hdcp2_cap) + seq_printf(m, "%s ", "None"); + + seq_puts(m, "\n"); + + return 0; +} + +/* + * Returns whether the connected display is internal and not hotpluggable. + * Example usage: cat /sys/kernel/debug/dri/0/DP-1/internal_display + */ +static int internal_display_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + struct dc_link *link = aconnector->dc_link; + + seq_printf(m, "Internal: %u\n", link->is_internal_display); + + return 0; +} + +/* function description + * + * generic SDP message access for testing + * + * debugfs sdp_message is located at /syskernel/debug/dri/0/DP-x + * + * SDP header + * Hb0 : Secondary-Data Packet ID + * Hb1 : Secondary-Data Packet type + * Hb2 : Secondary-Data-packet-specific header, Byte 0 + * Hb3 : Secondary-Data-packet-specific header, Byte 1 + * + * for using custom sdp message: input 4 bytes SDP header and 32 bytes raw data + */ +static ssize_t dp_sdp_message_debugfs_write(struct file *f, const char __user *buf, + size_t size, loff_t *pos) +{ + int r; + uint8_t data[36]; + struct amdgpu_dm_connector *connector = file_inode(f)->i_private; + struct dm_crtc_state *acrtc_state; + uint32_t write_size = 36; + + if (connector->base.status != connector_status_connected) + return -ENODEV; + + if (size == 0) + return 0; + + acrtc_state = to_dm_crtc_state(connector->base.state->crtc->state); + + r = copy_from_user(data, buf, write_size); + + write_size -= r; + + dc_stream_send_dp_sdp(acrtc_state->stream, data, write_size); + + return write_size; +} + +/* function: Read link's DSC & FEC capabilities + * + * + * Access it with the following command (you need to specify + * connector like DP-1): + * + * cat /sys/kernel/debug/dri/0/DP-X/dp_dsc_fec_support + * + */ +static int dp_dsc_fec_support_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct drm_modeset_acquire_ctx ctx; + struct drm_device *dev = connector->dev; + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + int ret = 0; + bool try_again = false; + bool is_fec_supported = false; + bool is_dsc_supported = false; + struct dpcd_caps dpcd_caps; + + drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE); + do { + try_again = false; + ret = drm_modeset_lock(&dev->mode_config.connection_mutex, &ctx); + if (ret) { + if (ret == -EDEADLK) { + ret = drm_modeset_backoff(&ctx); + if (!ret) { + try_again = true; + continue; + } + } + break; + } + if (connector->status != connector_status_connected) { + ret = -ENODEV; + break; + } + dpcd_caps = aconnector->dc_link->dpcd_caps; + if (aconnector->mst_output_port) { + /* aconnector sets dsc_aux during get_modes call + * if MST connector has it means it can either + * enable DSC on the sink device or on MST branch + * its connected to. + */ + if (aconnector->dsc_aux) { + is_fec_supported = true; + is_dsc_supported = true; + } + } else { + is_fec_supported = dpcd_caps.fec_cap.raw & 0x1; + is_dsc_supported = dpcd_caps.dsc_caps.dsc_basic_caps.raw[0] & 0x1; + } + } while (try_again); + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); + + seq_printf(m, "FEC_Sink_Support: %s\n", str_yes_no(is_fec_supported)); + seq_printf(m, "DSC_Sink_Support: %s\n", str_yes_no(is_dsc_supported)); + + return ret; +} + +/* function: Trigger virtual HPD redetection on connector + * + * This function will perform link rediscovery, link disable + * and enable, and dm connector state update. + * + * Retrigger HPD on an existing connector by echoing 1 into + * its respectful "trigger_hotplug" debugfs entry: + * + * echo 1 > /sys/kernel/debug/dri/0/DP-X/trigger_hotplug + * + * This function can perform HPD unplug: + * + * echo 0 > /sys/kernel/debug/dri/0/DP-X/trigger_hotplug + * + */ +static ssize_t trigger_hotplug(struct file *f, const char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; + struct drm_connector *connector = &aconnector->base; + struct dc_link *link = NULL; + struct drm_device *dev = connector->dev; + struct amdgpu_device *adev = drm_to_adev(dev); + enum dc_connection_type new_connection_type = dc_connection_none; + char *wr_buf = NULL; + uint32_t wr_buf_size = 42; + int max_param_num = 1; + long param[1] = {0}; + uint8_t param_nums = 0; + bool ret = false; + + if (!aconnector || !aconnector->dc_link) + return -EINVAL; + + if (size == 0) + return -EINVAL; + + wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL); + + if (!wr_buf) { + DRM_DEBUG_DRIVER("no memory to allocate write buffer\n"); + return -ENOSPC; + } + + if (parse_write_buffer_into_params(wr_buf, wr_buf_size, + (long *)param, buf, + max_param_num, + ¶m_nums)) { + kfree(wr_buf); + return -EINVAL; + } + + kfree(wr_buf); + + if (param_nums <= 0) { + DRM_DEBUG_DRIVER("user data not be read\n"); + return -EINVAL; + } + + mutex_lock(&aconnector->hpd_lock); + + /* Don't support for mst end device*/ + if (aconnector->mst_root) { + mutex_unlock(&aconnector->hpd_lock); + return -EINVAL; + } + + if (param[0] == 1) { + + if (!dc_link_detect_connection_type(aconnector->dc_link, &new_connection_type) && + new_connection_type != dc_connection_none) + goto unlock; + + mutex_lock(&adev->dm.dc_lock); + ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); + mutex_unlock(&adev->dm.dc_lock); + + if (!ret) + goto unlock; + + amdgpu_dm_update_connector_after_detect(aconnector); + + drm_modeset_lock_all(dev); + dm_restore_drm_connector_state(dev, connector); + drm_modeset_unlock_all(dev); + + drm_kms_helper_connector_hotplug_event(connector); + } else if (param[0] == 0) { + if (!aconnector->dc_link) + goto unlock; + + link = aconnector->dc_link; + + if (link->local_sink) { + dc_sink_release(link->local_sink); + link->local_sink = NULL; + } + + link->dpcd_sink_count = 0; + link->type = dc_connection_none; + link->dongle_max_pix_clk = 0; + + amdgpu_dm_update_connector_after_detect(aconnector); + + /* If the aconnector is the root node in mst topology */ + if (aconnector->mst_mgr.mst_state == true) + dc_link_reset_cur_dp_mst_topology(link); + + drm_modeset_lock_all(dev); + dm_restore_drm_connector_state(dev, connector); + drm_modeset_unlock_all(dev); + + drm_kms_helper_connector_hotplug_event(connector); + } + +unlock: + mutex_unlock(&aconnector->hpd_lock); + + return size; +} + +/* function: read DSC status on the connector + * + * The read function: dp_dsc_clock_en_read + * returns current status of DSC clock on the connector. + * The return is a boolean flag: 1 or 0. + * + * Access it with the following command (you need to specify + * connector like DP-1): + * + * cat /sys/kernel/debug/dri/0/DP-X/dsc_clock_en + * + * Expected output: + * 1 - means that DSC is currently enabled + * 0 - means that DSC is disabled + */ +static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf, + size_t size, loff_t *pos) +{ + char *rd_buf = NULL; + char *rd_buf_ptr = NULL; + struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; + struct display_stream_compressor *dsc; + struct dcn_dsc_state dsc_state = {0}; + const uint32_t rd_buf_size = 10; + struct pipe_ctx *pipe_ctx; + ssize_t result = 0; + int i, r, str_len = 30; + + rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL); + + if (!rd_buf) + return -ENOMEM; + + rd_buf_ptr = rd_buf; + + for (i = 0; i < MAX_PIPES; i++) { + pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; + if (pipe_ctx->stream && + pipe_ctx->stream->link == aconnector->dc_link) + break; + } + + dsc = pipe_ctx->stream_res.dsc; + if (dsc) + dsc->funcs->dsc_read_state(dsc, &dsc_state); + + snprintf(rd_buf_ptr, str_len, + "%d\n", + dsc_state.dsc_clock_en); + rd_buf_ptr += str_len; + + while (size) { + if (*pos >= rd_buf_size) + break; + + r = put_user(*(rd_buf + result), buf); + if (r) { + kfree(rd_buf); + return r; /* r = -EFAULT */ + } + + buf += 1; + size -= 1; + *pos += 1; + result += 1; + } + + kfree(rd_buf); + return result; +} + +/* function: write force DSC on the connector + * + * The write function: dp_dsc_clock_en_write + * enables to force DSC on the connector. + * User can write to either force enable or force disable DSC + * on the next modeset or set it to driver default + * + * Accepted inputs: + * 0 - default DSC enablement policy + * 1 - force enable DSC on the connector + * 2 - force disable DSC on the connector (might cause fail in atomic_check) + * + * Writing DSC settings is done with the following command: + * - To force enable DSC (you need to specify + * connector like DP-1): + * + * echo 0x1 > /sys/kernel/debug/dri/0/DP-X/dsc_clock_en + * + * - To return to default state set the flag to zero and + * let driver deal with DSC automatically + * (you need to specify connector like DP-1): + * + * echo 0x0 > /sys/kernel/debug/dri/0/DP-X/dsc_clock_en + * + */ +static ssize_t dp_dsc_clock_en_write(struct file *f, const char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; + struct drm_connector *connector = &aconnector->base; + struct drm_device *dev = connector->dev; + struct drm_crtc *crtc = NULL; + struct dm_crtc_state *dm_crtc_state = NULL; + struct pipe_ctx *pipe_ctx; + int i; + char *wr_buf = NULL; + uint32_t wr_buf_size = 42; + int max_param_num = 1; + long param[1] = {0}; + uint8_t param_nums = 0; + + if (size == 0) + return -EINVAL; + + wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL); + + if (!wr_buf) { + DRM_DEBUG_DRIVER("no memory to allocate write buffer\n"); + return -ENOSPC; + } + + if (parse_write_buffer_into_params(wr_buf, wr_buf_size, + (long *)param, buf, + max_param_num, + ¶m_nums)) { + kfree(wr_buf); + return -EINVAL; + } + + if (param_nums <= 0) { + DRM_DEBUG_DRIVER("user data not be read\n"); + kfree(wr_buf); + return -EINVAL; + } + + for (i = 0; i < MAX_PIPES; i++) { + pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; + if (pipe_ctx->stream && + pipe_ctx->stream->link == aconnector->dc_link) + break; + } + + if (!pipe_ctx->stream) + goto done; + + // Get CRTC state + mutex_lock(&dev->mode_config.mutex); + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + + if (connector->state == NULL) + goto unlock; + + crtc = connector->state->crtc; + if (crtc == NULL) + goto unlock; + + drm_modeset_lock(&crtc->mutex, NULL); + if (crtc->state == NULL) + goto unlock; + + dm_crtc_state = to_dm_crtc_state(crtc->state); + if (dm_crtc_state->stream == NULL) + goto unlock; + + if (param[0] == 1) + aconnector->dsc_settings.dsc_force_enable = DSC_CLK_FORCE_ENABLE; + else if (param[0] == 2) + aconnector->dsc_settings.dsc_force_enable = DSC_CLK_FORCE_DISABLE; + else + aconnector->dsc_settings.dsc_force_enable = DSC_CLK_FORCE_DEFAULT; + + dm_crtc_state->dsc_force_changed = true; + +unlock: + if (crtc) + drm_modeset_unlock(&crtc->mutex); + drm_modeset_unlock(&dev->mode_config.connection_mutex); + mutex_unlock(&dev->mode_config.mutex); + +done: + kfree(wr_buf); + return size; +} + +/* function: read DSC slice width parameter on the connector + * + * The read function: dp_dsc_slice_width_read + * returns dsc slice width used in the current configuration + * The return is an integer: 0 or other positive number + * + * Access the status with the following command: + * + * cat /sys/kernel/debug/dri/0/DP-X/dsc_slice_width + * + * 0 - means that DSC is disabled + * + * Any other number more than zero represents the + * slice width currently used by DSC in pixels + * + */ +static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf, + size_t size, loff_t *pos) +{ + char *rd_buf = NULL; + char *rd_buf_ptr = NULL; + struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; + struct display_stream_compressor *dsc; + struct dcn_dsc_state dsc_state = {0}; + const uint32_t rd_buf_size = 100; + struct pipe_ctx *pipe_ctx; + ssize_t result = 0; + int i, r, str_len = 30; + + rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL); + + if (!rd_buf) + return -ENOMEM; + + rd_buf_ptr = rd_buf; + + for (i = 0; i < MAX_PIPES; i++) { + pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; + if (pipe_ctx->stream && + pipe_ctx->stream->link == aconnector->dc_link) + break; + } + + dsc = pipe_ctx->stream_res.dsc; + if (dsc) + dsc->funcs->dsc_read_state(dsc, &dsc_state); + + snprintf(rd_buf_ptr, str_len, + "%d\n", + dsc_state.dsc_slice_width); + rd_buf_ptr += str_len; + + while (size) { + if (*pos >= rd_buf_size) + break; + + r = put_user(*(rd_buf + result), buf); + if (r) { + kfree(rd_buf); + return r; /* r = -EFAULT */ + } + + buf += 1; + size -= 1; + *pos += 1; + result += 1; + } + + kfree(rd_buf); + return result; +} + +/* function: write DSC slice width parameter + * + * The write function: dp_dsc_slice_width_write + * overwrites automatically generated DSC configuration + * of slice width. + * + * The user has to write the slice width divisible by the + * picture width. + * + * Also the user has to write width in hexidecimal + * rather than in decimal. + * + * Writing DSC settings is done with the following command: + * - To force overwrite slice width: (example sets to 1920 pixels) + * + * echo 0x780 > /sys/kernel/debug/dri/0/DP-X/dsc_slice_width + * + * - To stop overwriting and let driver find the optimal size, + * set the width to zero: + * + * echo 0x0 > /sys/kernel/debug/dri/0/DP-X/dsc_slice_width + * + */ +static ssize_t dp_dsc_slice_width_write(struct file *f, const char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; + struct pipe_ctx *pipe_ctx; + struct drm_connector *connector = &aconnector->base; + struct drm_device *dev = connector->dev; + struct drm_crtc *crtc = NULL; + struct dm_crtc_state *dm_crtc_state = NULL; + int i; + char *wr_buf = NULL; + uint32_t wr_buf_size = 42; + int max_param_num = 1; + long param[1] = {0}; + uint8_t param_nums = 0; + + if (size == 0) + return -EINVAL; + + wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL); + + if (!wr_buf) { + DRM_DEBUG_DRIVER("no memory to allocate write buffer\n"); + return -ENOSPC; + } + + if (parse_write_buffer_into_params(wr_buf, wr_buf_size, + (long *)param, buf, + max_param_num, + ¶m_nums)) { + kfree(wr_buf); + return -EINVAL; + } + + if (param_nums <= 0) { + DRM_DEBUG_DRIVER("user data not be read\n"); + kfree(wr_buf); + return -EINVAL; + } + + for (i = 0; i < MAX_PIPES; i++) { + pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; + if (pipe_ctx->stream && + pipe_ctx->stream->link == aconnector->dc_link) + break; + } + + if (!pipe_ctx->stream) + goto done; + + // Safely get CRTC state + mutex_lock(&dev->mode_config.mutex); + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + + if (connector->state == NULL) + goto unlock; + + crtc = connector->state->crtc; + if (crtc == NULL) + goto unlock; + + drm_modeset_lock(&crtc->mutex, NULL); + if (crtc->state == NULL) + goto unlock; + + dm_crtc_state = to_dm_crtc_state(crtc->state); + if (dm_crtc_state->stream == NULL) + goto unlock; + + if (param[0] > 0) + aconnector->dsc_settings.dsc_num_slices_h = DIV_ROUND_UP( + pipe_ctx->stream->timing.h_addressable, + param[0]); + else + aconnector->dsc_settings.dsc_num_slices_h = 0; + + dm_crtc_state->dsc_force_changed = true; + +unlock: + if (crtc) + drm_modeset_unlock(&crtc->mutex); + drm_modeset_unlock(&dev->mode_config.connection_mutex); + mutex_unlock(&dev->mode_config.mutex); + +done: + kfree(wr_buf); + return size; +} + +/* function: read DSC slice height parameter on the connector + * + * The read function: dp_dsc_slice_height_read + * returns dsc slice height used in the current configuration + * The return is an integer: 0 or other positive number + * + * Access the status with the following command: + * + * cat /sys/kernel/debug/dri/0/DP-X/dsc_slice_height + * + * 0 - means that DSC is disabled + * + * Any other number more than zero represents the + * slice height currently used by DSC in pixels + * + */ +static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf, + size_t size, loff_t *pos) +{ + char *rd_buf = NULL; + char *rd_buf_ptr = NULL; + struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; + struct display_stream_compressor *dsc; + struct dcn_dsc_state dsc_state = {0}; + const uint32_t rd_buf_size = 100; + struct pipe_ctx *pipe_ctx; + ssize_t result = 0; + int i, r, str_len = 30; + + rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL); + + if (!rd_buf) + return -ENOMEM; + + rd_buf_ptr = rd_buf; + + for (i = 0; i < MAX_PIPES; i++) { + pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; + if (pipe_ctx->stream && + pipe_ctx->stream->link == aconnector->dc_link) + break; + } + + dsc = pipe_ctx->stream_res.dsc; + if (dsc) + dsc->funcs->dsc_read_state(dsc, &dsc_state); + + snprintf(rd_buf_ptr, str_len, + "%d\n", + dsc_state.dsc_slice_height); + rd_buf_ptr += str_len; + + while (size) { + if (*pos >= rd_buf_size) + break; + + r = put_user(*(rd_buf + result), buf); + if (r) { + kfree(rd_buf); + return r; /* r = -EFAULT */ + } + + buf += 1; + size -= 1; + *pos += 1; + result += 1; + } + + kfree(rd_buf); + return result; +} + +/* function: write DSC slice height parameter + * + * The write function: dp_dsc_slice_height_write + * overwrites automatically generated DSC configuration + * of slice height. + * + * The user has to write the slice height divisible by the + * picture height. + * + * Also the user has to write height in hexidecimal + * rather than in decimal. + * + * Writing DSC settings is done with the following command: + * - To force overwrite slice height (example sets to 128 pixels): + * + * echo 0x80 > /sys/kernel/debug/dri/0/DP-X/dsc_slice_height + * + * - To stop overwriting and let driver find the optimal size, + * set the height to zero: + * + * echo 0x0 > /sys/kernel/debug/dri/0/DP-X/dsc_slice_height + * + */ +static ssize_t dp_dsc_slice_height_write(struct file *f, const char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; + struct drm_connector *connector = &aconnector->base; + struct drm_device *dev = connector->dev; + struct drm_crtc *crtc = NULL; + struct dm_crtc_state *dm_crtc_state = NULL; + struct pipe_ctx *pipe_ctx; + int i; + char *wr_buf = NULL; + uint32_t wr_buf_size = 42; + int max_param_num = 1; + uint8_t param_nums = 0; + long param[1] = {0}; + + if (size == 0) + return -EINVAL; + + wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL); + + if (!wr_buf) { + DRM_DEBUG_DRIVER("no memory to allocate write buffer\n"); + return -ENOSPC; + } + + if (parse_write_buffer_into_params(wr_buf, wr_buf_size, + (long *)param, buf, + max_param_num, + ¶m_nums)) { + kfree(wr_buf); + return -EINVAL; + } + + if (param_nums <= 0) { + DRM_DEBUG_DRIVER("user data not be read\n"); + kfree(wr_buf); + return -EINVAL; + } + + for (i = 0; i < MAX_PIPES; i++) { + pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; + if (pipe_ctx->stream && + pipe_ctx->stream->link == aconnector->dc_link) + break; + } + + if (!pipe_ctx->stream) + goto done; + + // Get CRTC state + mutex_lock(&dev->mode_config.mutex); + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + + if (connector->state == NULL) + goto unlock; + + crtc = connector->state->crtc; + if (crtc == NULL) + goto unlock; + + drm_modeset_lock(&crtc->mutex, NULL); + if (crtc->state == NULL) + goto unlock; + + dm_crtc_state = to_dm_crtc_state(crtc->state); + if (dm_crtc_state->stream == NULL) + goto unlock; + + if (param[0] > 0) + aconnector->dsc_settings.dsc_num_slices_v = DIV_ROUND_UP( + pipe_ctx->stream->timing.v_addressable, + param[0]); + else + aconnector->dsc_settings.dsc_num_slices_v = 0; + + dm_crtc_state->dsc_force_changed = true; + +unlock: + if (crtc) + drm_modeset_unlock(&crtc->mutex); + drm_modeset_unlock(&dev->mode_config.connection_mutex); + mutex_unlock(&dev->mode_config.mutex); + +done: + kfree(wr_buf); + return size; +} + +/* function: read DSC target rate on the connector in bits per pixel + * + * The read function: dp_dsc_bits_per_pixel_read + * returns target rate of compression in bits per pixel + * The return is an integer: 0 or other positive integer + * + * Access it with the following command: + * + * cat /sys/kernel/debug/dri/0/DP-X/dsc_bits_per_pixel + * + * 0 - means that DSC is disabled + */ +static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf, + size_t size, loff_t *pos) +{ + char *rd_buf = NULL; + char *rd_buf_ptr = NULL; + struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; + struct display_stream_compressor *dsc; + struct dcn_dsc_state dsc_state = {0}; + const uint32_t rd_buf_size = 100; + struct pipe_ctx *pipe_ctx; + ssize_t result = 0; + int i, r, str_len = 30; + + rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL); + + if (!rd_buf) + return -ENOMEM; + + rd_buf_ptr = rd_buf; + + for (i = 0; i < MAX_PIPES; i++) { + pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; + if (pipe_ctx->stream && + pipe_ctx->stream->link == aconnector->dc_link) + break; + } + + dsc = pipe_ctx->stream_res.dsc; + if (dsc) + dsc->funcs->dsc_read_state(dsc, &dsc_state); + + snprintf(rd_buf_ptr, str_len, + "%d\n", + dsc_state.dsc_bits_per_pixel); + rd_buf_ptr += str_len; + + while (size) { + if (*pos >= rd_buf_size) + break; + + r = put_user(*(rd_buf + result), buf); + if (r) { + kfree(rd_buf); + return r; /* r = -EFAULT */ + } + + buf += 1; + size -= 1; + *pos += 1; + result += 1; + } + + kfree(rd_buf); + return result; +} + +/* function: write DSC target rate in bits per pixel + * + * The write function: dp_dsc_bits_per_pixel_write + * overwrites automatically generated DSC configuration + * of DSC target bit rate. + * + * Also the user has to write bpp in hexidecimal + * rather than in decimal. + * + * Writing DSC settings is done with the following command: + * - To force overwrite rate (example sets to 256 bpp x 1/16): + * + * echo 0x100 > /sys/kernel/debug/dri/0/DP-X/dsc_bits_per_pixel + * + * - To stop overwriting and let driver find the optimal rate, + * set the rate to zero: + * + * echo 0x0 > /sys/kernel/debug/dri/0/DP-X/dsc_bits_per_pixel + * + */ +static ssize_t dp_dsc_bits_per_pixel_write(struct file *f, const char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; + struct drm_connector *connector = &aconnector->base; + struct drm_device *dev = connector->dev; + struct drm_crtc *crtc = NULL; + struct dm_crtc_state *dm_crtc_state = NULL; + struct pipe_ctx *pipe_ctx; + int i; + char *wr_buf = NULL; + uint32_t wr_buf_size = 42; + int max_param_num = 1; + uint8_t param_nums = 0; + long param[1] = {0}; + + if (size == 0) + return -EINVAL; + + wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL); + + if (!wr_buf) { + DRM_DEBUG_DRIVER("no memory to allocate write buffer\n"); + return -ENOSPC; + } + + if (parse_write_buffer_into_params(wr_buf, wr_buf_size, + (long *)param, buf, + max_param_num, + ¶m_nums)) { + kfree(wr_buf); + return -EINVAL; + } + + if (param_nums <= 0) { + DRM_DEBUG_DRIVER("user data not be read\n"); + kfree(wr_buf); + return -EINVAL; + } + + for (i = 0; i < MAX_PIPES; i++) { + pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; + if (pipe_ctx->stream && + pipe_ctx->stream->link == aconnector->dc_link) + break; + } + + if (!pipe_ctx->stream) + goto done; + + // Get CRTC state + mutex_lock(&dev->mode_config.mutex); + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + + if (connector->state == NULL) + goto unlock; + + crtc = connector->state->crtc; + if (crtc == NULL) + goto unlock; + + drm_modeset_lock(&crtc->mutex, NULL); + if (crtc->state == NULL) + goto unlock; + + dm_crtc_state = to_dm_crtc_state(crtc->state); + if (dm_crtc_state->stream == NULL) + goto unlock; + + aconnector->dsc_settings.dsc_bits_per_pixel = param[0]; + + dm_crtc_state->dsc_force_changed = true; + +unlock: + if (crtc) + drm_modeset_unlock(&crtc->mutex); + drm_modeset_unlock(&dev->mode_config.connection_mutex); + mutex_unlock(&dev->mode_config.mutex); + +done: + kfree(wr_buf); + return size; +} + +/* function: read DSC picture width parameter on the connector + * + * The read function: dp_dsc_pic_width_read + * returns dsc picture width used in the current configuration + * It is the same as h_addressable of the current + * display's timing + * The return is an integer: 0 or other positive integer + * If 0 then DSC is disabled. + * + * Access it with the following command: + * + * cat /sys/kernel/debug/dri/0/DP-X/dsc_pic_width + * + * 0 - means that DSC is disabled + */ +static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf, + size_t size, loff_t *pos) +{ + char *rd_buf = NULL; + char *rd_buf_ptr = NULL; + struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; + struct display_stream_compressor *dsc; + struct dcn_dsc_state dsc_state = {0}; + const uint32_t rd_buf_size = 100; + struct pipe_ctx *pipe_ctx; + ssize_t result = 0; + int i, r, str_len = 30; + + rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL); + + if (!rd_buf) + return -ENOMEM; + + rd_buf_ptr = rd_buf; + + for (i = 0; i < MAX_PIPES; i++) { + pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; + if (pipe_ctx->stream && + pipe_ctx->stream->link == aconnector->dc_link) + break; + } + + dsc = pipe_ctx->stream_res.dsc; + if (dsc) + dsc->funcs->dsc_read_state(dsc, &dsc_state); + + snprintf(rd_buf_ptr, str_len, + "%d\n", + dsc_state.dsc_pic_width); + rd_buf_ptr += str_len; + + while (size) { + if (*pos >= rd_buf_size) + break; + + r = put_user(*(rd_buf + result), buf); + if (r) { + kfree(rd_buf); + return r; /* r = -EFAULT */ + } + + buf += 1; + size -= 1; + *pos += 1; + result += 1; + } + + kfree(rd_buf); + return result; +} + +static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf, + size_t size, loff_t *pos) +{ + char *rd_buf = NULL; + char *rd_buf_ptr = NULL; + struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; + struct display_stream_compressor *dsc; + struct dcn_dsc_state dsc_state = {0}; + const uint32_t rd_buf_size = 100; + struct pipe_ctx *pipe_ctx; + ssize_t result = 0; + int i, r, str_len = 30; + + rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL); + + if (!rd_buf) + return -ENOMEM; + + rd_buf_ptr = rd_buf; + + for (i = 0; i < MAX_PIPES; i++) { + pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; + if (pipe_ctx->stream && + pipe_ctx->stream->link == aconnector->dc_link) + break; + } + + dsc = pipe_ctx->stream_res.dsc; + if (dsc) + dsc->funcs->dsc_read_state(dsc, &dsc_state); + + snprintf(rd_buf_ptr, str_len, + "%d\n", + dsc_state.dsc_pic_height); + rd_buf_ptr += str_len; + + while (size) { + if (*pos >= rd_buf_size) + break; + + r = put_user(*(rd_buf + result), buf); + if (r) { + kfree(rd_buf); + return r; /* r = -EFAULT */ + } + + buf += 1; + size -= 1; + *pos += 1; + result += 1; + } + + kfree(rd_buf); + return result; +} + +/* function: read DSC chunk size parameter on the connector + * + * The read function: dp_dsc_chunk_size_read + * returns dsc chunk size set in the current configuration + * The value is calculated automatically by DSC code + * and depends on slice parameters and bpp target rate + * The return is an integer: 0 or other positive integer + * If 0 then DSC is disabled. + * + * Access it with the following command: + * + * cat /sys/kernel/debug/dri/0/DP-X/dsc_chunk_size + * + * 0 - means that DSC is disabled + */ +static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf, + size_t size, loff_t *pos) +{ + char *rd_buf = NULL; + char *rd_buf_ptr = NULL; + struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; + struct display_stream_compressor *dsc; + struct dcn_dsc_state dsc_state = {0}; + const uint32_t rd_buf_size = 100; + struct pipe_ctx *pipe_ctx; + ssize_t result = 0; + int i, r, str_len = 30; + + rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL); + + if (!rd_buf) + return -ENOMEM; + + rd_buf_ptr = rd_buf; + + for (i = 0; i < MAX_PIPES; i++) { + pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; + if (pipe_ctx->stream && + pipe_ctx->stream->link == aconnector->dc_link) + break; + } + + dsc = pipe_ctx->stream_res.dsc; + if (dsc) + dsc->funcs->dsc_read_state(dsc, &dsc_state); + + snprintf(rd_buf_ptr, str_len, + "%d\n", + dsc_state.dsc_chunk_size); + rd_buf_ptr += str_len; + + while (size) { + if (*pos >= rd_buf_size) + break; + + r = put_user(*(rd_buf + result), buf); + if (r) { + kfree(rd_buf); + return r; /* r = -EFAULT */ + } + + buf += 1; + size -= 1; + *pos += 1; + result += 1; + } + + kfree(rd_buf); + return result; +} + +/* function: read DSC slice bpg offset on the connector + * + * The read function: dp_dsc_slice_bpg_offset_read + * returns dsc bpg slice offset set in the current configuration + * The value is calculated automatically by DSC code + * and depends on slice parameters and bpp target rate + * The return is an integer: 0 or other positive integer + * If 0 then DSC is disabled. + * + * Access it with the following command: + * + * cat /sys/kernel/debug/dri/0/DP-X/dsc_slice_bpg_offset + * + * 0 - means that DSC is disabled + */ +static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf, + size_t size, loff_t *pos) +{ + char *rd_buf = NULL; + char *rd_buf_ptr = NULL; + struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; + struct display_stream_compressor *dsc; + struct dcn_dsc_state dsc_state = {0}; + const uint32_t rd_buf_size = 100; + struct pipe_ctx *pipe_ctx; + ssize_t result = 0; + int i, r, str_len = 30; + + rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL); + + if (!rd_buf) + return -ENOMEM; + + rd_buf_ptr = rd_buf; + + for (i = 0; i < MAX_PIPES; i++) { + pipe_ctx = &aconnector->dc_link->dc->current_state->res_ctx.pipe_ctx[i]; + if (pipe_ctx->stream && + pipe_ctx->stream->link == aconnector->dc_link) + break; + } + + dsc = pipe_ctx->stream_res.dsc; + if (dsc) + dsc->funcs->dsc_read_state(dsc, &dsc_state); + + snprintf(rd_buf_ptr, str_len, + "%d\n", + dsc_state.dsc_slice_bpg_offset); + rd_buf_ptr += str_len; + + while (size) { + if (*pos >= rd_buf_size) + break; + + r = put_user(*(rd_buf + result), buf); + if (r) { + kfree(rd_buf); + return r; /* r = -EFAULT */ + } + + buf += 1; + size -= 1; + *pos += 1; + result += 1; + } + + kfree(rd_buf); + return result; +} + + +/* + * function description: Read max_requested_bpc property from the connector + * + * Access it with the following command: + * + * cat /sys/kernel/debug/dri/0/DP-X/max_bpc + * + */ +static ssize_t dp_max_bpc_read(struct file *f, char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; + struct drm_connector *connector = &aconnector->base; + struct drm_device *dev = connector->dev; + struct dm_connector_state *state; + ssize_t result = 0; + char *rd_buf = NULL; + char *rd_buf_ptr = NULL; + const uint32_t rd_buf_size = 10; + int r; + + rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL); + + if (!rd_buf) + return -ENOMEM; + + mutex_lock(&dev->mode_config.mutex); + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + + if (connector->state == NULL) + goto unlock; + + state = to_dm_connector_state(connector->state); + + rd_buf_ptr = rd_buf; + snprintf(rd_buf_ptr, rd_buf_size, + "%u\n", + state->base.max_requested_bpc); + + while (size) { + if (*pos >= rd_buf_size) + break; + + r = put_user(*(rd_buf + result), buf); + if (r) { + result = r; /* r = -EFAULT */ + goto unlock; + } + buf += 1; + size -= 1; + *pos += 1; + result += 1; + } +unlock: + drm_modeset_unlock(&dev->mode_config.connection_mutex); + mutex_unlock(&dev->mode_config.mutex); + kfree(rd_buf); + return result; +} + + +/* + * function description: Set max_requested_bpc property on the connector + * + * This function will not force the input BPC on connector, it will only + * change the max value. This is equivalent to setting max_bpc through + * xrandr. + * + * The BPC value written must be >= 6 and <= 16. Values outside of this + * range will result in errors. + * + * BPC values: + * 0x6 - 6 BPC + * 0x8 - 8 BPC + * 0xa - 10 BPC + * 0xc - 12 BPC + * 0x10 - 16 BPC + * + * Write the max_bpc in the following way: + * + * echo 0x6 > /sys/kernel/debug/dri/0/DP-X/max_bpc + * + */ +static ssize_t dp_max_bpc_write(struct file *f, const char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_dm_connector *aconnector = file_inode(f)->i_private; + struct drm_connector *connector = &aconnector->base; + struct dm_connector_state *state; + struct drm_device *dev = connector->dev; + char *wr_buf = NULL; + uint32_t wr_buf_size = 42; + int max_param_num = 1; + long param[1] = {0}; + uint8_t param_nums = 0; + + if (size == 0) + return -EINVAL; + + wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL); + + if (!wr_buf) { + DRM_DEBUG_DRIVER("no memory to allocate write buffer\n"); + return -ENOSPC; + } + + if (parse_write_buffer_into_params(wr_buf, wr_buf_size, + (long *)param, buf, + max_param_num, + ¶m_nums)) { + kfree(wr_buf); + return -EINVAL; + } + + if (param_nums <= 0) { + DRM_DEBUG_DRIVER("user data not be read\n"); + kfree(wr_buf); + return -EINVAL; + } + + if (param[0] < 6 || param[0] > 16) { + DRM_DEBUG_DRIVER("bad max_bpc value\n"); + kfree(wr_buf); + return -EINVAL; + } + + mutex_lock(&dev->mode_config.mutex); + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + + if (connector->state == NULL) + goto unlock; + + state = to_dm_connector_state(connector->state); + state->base.max_requested_bpc = param[0]; +unlock: + drm_modeset_unlock(&dev->mode_config.connection_mutex); + mutex_unlock(&dev->mode_config.mutex); + + kfree(wr_buf); + return size; +} + +/* + * Backlight at this moment. Read only. + * As written to display, taking ABM and backlight lut into account. + * Ranges from 0x0 to 0x10000 (= 100% PWM) + * + * Example usage: cat /sys/kernel/debug/dri/0/eDP-1/current_backlight + */ +static int current_backlight_show(struct seq_file *m, void *unused) +{ + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(m->private); + struct dc_link *link = aconnector->dc_link; + unsigned int backlight; + + backlight = dc_link_get_backlight_level(link); + seq_printf(m, "0x%x\n", backlight); + + return 0; +} + +/* + * Backlight value that is being approached. Read only. + * As written to display, taking ABM and backlight lut into account. + * Ranges from 0x0 to 0x10000 (= 100% PWM) + * + * Example usage: cat /sys/kernel/debug/dri/0/eDP-1/target_backlight + */ +static int target_backlight_show(struct seq_file *m, void *unused) +{ + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(m->private); + struct dc_link *link = aconnector->dc_link; + unsigned int backlight; + + backlight = dc_link_get_target_backlight_pwm(link); + seq_printf(m, "0x%x\n", backlight); + + return 0; +} + +/* + * function description: Determine if the connector is mst connector + * + * This function helps to determine whether a connector is a mst connector. + * - "root" stands for the root connector of the topology + * - "branch" stands for branch device of the topology + * - "end" stands for leaf node connector of the topology + * - "no" stands for the connector is not a device of a mst topology + * Access it with the following command: + * + * cat /sys/kernel/debug/dri/0/DP-X/is_mst_connector + * + */ +static int dp_is_mst_connector_show(struct seq_file *m, void *unused) +{ + struct drm_connector *connector = m->private; + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + struct drm_dp_mst_topology_mgr *mgr = NULL; + struct drm_dp_mst_port *port = NULL; + char *role = NULL; + + mutex_lock(&aconnector->hpd_lock); + + if (aconnector->mst_mgr.mst_state) { + role = "root"; + } else if (aconnector->mst_root && + aconnector->mst_root->mst_mgr.mst_state) { + + role = "end"; + + mgr = &aconnector->mst_root->mst_mgr; + port = aconnector->mst_output_port; + + drm_modeset_lock(&mgr->base.lock, NULL); + if (port->pdt == DP_PEER_DEVICE_MST_BRANCHING && + port->mcs) + role = "branch"; + drm_modeset_unlock(&mgr->base.lock); + + } else { + role = "no"; + } + + seq_printf(m, "%s\n", role); + + mutex_unlock(&aconnector->hpd_lock); + + return 0; +} + +/* + * function description: Read out the mst progress status + * + * This function helps to determine the mst progress status of + * a mst connector. + * + * Access it with the following command: + * + * cat /sys/kernel/debug/dri/0/DP-X/mst_progress_status + * + */ +static int dp_mst_progress_status_show(struct seq_file *m, void *unused) +{ + struct drm_connector *connector = m->private; + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + struct amdgpu_device *adev = drm_to_adev(connector->dev); + int i; + + mutex_lock(&aconnector->hpd_lock); + mutex_lock(&adev->dm.dc_lock); + + if (aconnector->mst_status == MST_STATUS_DEFAULT) { + seq_puts(m, "disabled\n"); + } else { + for (i = 0; i < sizeof(mst_progress_status)/sizeof(char *); i++) + seq_printf(m, "%s:%s\n", + mst_progress_status[i], + aconnector->mst_status & BIT(i) ? "done" : "not_done"); + } + + mutex_unlock(&adev->dm.dc_lock); + mutex_unlock(&aconnector->hpd_lock); + + return 0; +} + +/* + * Reports whether the connected display is a USB4 DPIA tunneled display + * Example usage: cat /sys/kernel/debug/dri/0/DP-8/is_dpia_link + */ +static int is_dpia_link_show(struct seq_file *m, void *data) +{ + struct drm_connector *connector = m->private; + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + struct dc_link *link = aconnector->dc_link; + + if (connector->status != connector_status_connected) + return -ENODEV; + + seq_printf(m, "%s\n", (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) ? "yes" : + (link->ep_type == DISPLAY_ENDPOINT_PHY) ? "no" : "unknown"); + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(dp_dsc_fec_support); +DEFINE_SHOW_ATTRIBUTE(dmub_fw_state); +DEFINE_SHOW_ATTRIBUTE(dmub_tracebuffer); +DEFINE_SHOW_ATTRIBUTE(dp_lttpr_status); +DEFINE_SHOW_ATTRIBUTE(hdcp_sink_capability); +DEFINE_SHOW_ATTRIBUTE(internal_display); +DEFINE_SHOW_ATTRIBUTE(psr_capability); +DEFINE_SHOW_ATTRIBUTE(dp_is_mst_connector); +DEFINE_SHOW_ATTRIBUTE(dp_mst_progress_status); +DEFINE_SHOW_ATTRIBUTE(is_dpia_link); + +static const struct file_operations dp_dsc_clock_en_debugfs_fops = { + .owner = THIS_MODULE, + .read = dp_dsc_clock_en_read, + .write = dp_dsc_clock_en_write, + .llseek = default_llseek +}; + +static const struct file_operations dp_dsc_slice_width_debugfs_fops = { + .owner = THIS_MODULE, + .read = dp_dsc_slice_width_read, + .write = dp_dsc_slice_width_write, + .llseek = default_llseek +}; + +static const struct file_operations dp_dsc_slice_height_debugfs_fops = { + .owner = THIS_MODULE, + .read = dp_dsc_slice_height_read, + .write = dp_dsc_slice_height_write, + .llseek = default_llseek +}; + +static const struct file_operations dp_dsc_bits_per_pixel_debugfs_fops = { + .owner = THIS_MODULE, + .read = dp_dsc_bits_per_pixel_read, + .write = dp_dsc_bits_per_pixel_write, + .llseek = default_llseek +}; + +static const struct file_operations dp_dsc_pic_width_debugfs_fops = { + .owner = THIS_MODULE, + .read = dp_dsc_pic_width_read, + .llseek = default_llseek +}; + +static const struct file_operations dp_dsc_pic_height_debugfs_fops = { + .owner = THIS_MODULE, + .read = dp_dsc_pic_height_read, + .llseek = default_llseek +}; + +static const struct file_operations dp_dsc_chunk_size_debugfs_fops = { + .owner = THIS_MODULE, + .read = dp_dsc_chunk_size_read, + .llseek = default_llseek +}; + +static const struct file_operations dp_dsc_slice_bpg_offset_debugfs_fops = { + .owner = THIS_MODULE, + .read = dp_dsc_slice_bpg_offset_read, + .llseek = default_llseek +}; + +static const struct file_operations trigger_hotplug_debugfs_fops = { + .owner = THIS_MODULE, + .write = trigger_hotplug, + .llseek = default_llseek +}; + +static const struct file_operations dp_link_settings_debugfs_fops = { + .owner = THIS_MODULE, + .read = dp_link_settings_read, + .write = dp_link_settings_write, + .llseek = default_llseek +}; + +static const struct file_operations dp_phy_settings_debugfs_fop = { + .owner = THIS_MODULE, + .read = dp_phy_settings_read, + .write = dp_phy_settings_write, + .llseek = default_llseek +}; + +static const struct file_operations dp_phy_test_pattern_fops = { + .owner = THIS_MODULE, + .write = dp_phy_test_pattern_debugfs_write, + .llseek = default_llseek +}; + +static const struct file_operations sdp_message_fops = { + .owner = THIS_MODULE, + .write = dp_sdp_message_debugfs_write, + .llseek = default_llseek +}; + +static const struct file_operations dp_max_bpc_debugfs_fops = { + .owner = THIS_MODULE, + .read = dp_max_bpc_read, + .write = dp_max_bpc_write, + .llseek = default_llseek +}; + +static const struct file_operations dp_dsc_disable_passthrough_debugfs_fops = { + .owner = THIS_MODULE, + .write = dp_dsc_passthrough_set, + .llseek = default_llseek +}; + +static const struct file_operations dp_mst_link_settings_debugfs_fops = { + .owner = THIS_MODULE, + .write = dp_mst_link_setting, + .llseek = default_llseek +}; + +static const struct { + char *name; + const struct file_operations *fops; +} dp_debugfs_entries[] = { + {"link_settings", &dp_link_settings_debugfs_fops}, + {"phy_settings", &dp_phy_settings_debugfs_fop}, + {"lttpr_status", &dp_lttpr_status_fops}, + {"test_pattern", &dp_phy_test_pattern_fops}, + {"hdcp_sink_capability", &hdcp_sink_capability_fops}, + {"sdp_message", &sdp_message_fops}, + {"dsc_clock_en", &dp_dsc_clock_en_debugfs_fops}, + {"dsc_slice_width", &dp_dsc_slice_width_debugfs_fops}, + {"dsc_slice_height", &dp_dsc_slice_height_debugfs_fops}, + {"dsc_bits_per_pixel", &dp_dsc_bits_per_pixel_debugfs_fops}, + {"dsc_pic_width", &dp_dsc_pic_width_debugfs_fops}, + {"dsc_pic_height", &dp_dsc_pic_height_debugfs_fops}, + {"dsc_chunk_size", &dp_dsc_chunk_size_debugfs_fops}, + {"dsc_slice_bpg", &dp_dsc_slice_bpg_offset_debugfs_fops}, + {"dp_dsc_fec_support", &dp_dsc_fec_support_fops}, + {"max_bpc", &dp_max_bpc_debugfs_fops}, + {"dsc_disable_passthrough", &dp_dsc_disable_passthrough_debugfs_fops}, + {"is_mst_connector", &dp_is_mst_connector_fops}, + {"mst_progress_status", &dp_mst_progress_status_fops}, + {"is_dpia_link", &is_dpia_link_fops}, + {"mst_link_settings", &dp_mst_link_settings_debugfs_fops} +}; + +static const struct { + char *name; + const struct file_operations *fops; +} hdmi_debugfs_entries[] = { + {"hdcp_sink_capability", &hdcp_sink_capability_fops} +}; + +/* + * Force YUV420 output if available from the given mode + */ +static int force_yuv420_output_set(void *data, u64 val) +{ + struct amdgpu_dm_connector *connector = data; + + connector->force_yuv420_output = (bool)val; + + return 0; +} + +/* + * Check if YUV420 is forced when available from the given mode + */ +static int force_yuv420_output_get(void *data, u64 *val) +{ + struct amdgpu_dm_connector *connector = data; + + *val = connector->force_yuv420_output; + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(force_yuv420_output_fops, force_yuv420_output_get, + force_yuv420_output_set, "%llu\n"); + +/* + * Read PSR state + */ +static int psr_get(void *data, u64 *val) +{ + struct amdgpu_dm_connector *connector = data; + struct dc_link *link = connector->dc_link; + enum dc_psr_state state = PSR_STATE0; + + dc_link_get_psr_state(link, &state); + + *val = state; + + return 0; +} + +/* + * Read PSR state residency + */ +static int psr_read_residency(void *data, u64 *val) +{ + struct amdgpu_dm_connector *connector = data; + struct dc_link *link = connector->dc_link; + u32 residency; + + link->dc->link_srv->edp_get_psr_residency(link, &residency); + + *val = (u64)residency; + + return 0; +} + +/* read allow_edp_hotplug_detection */ +static int allow_edp_hotplug_detection_get(void *data, u64 *val) +{ + struct amdgpu_dm_connector *aconnector = data; + struct drm_connector *connector = &aconnector->base; + struct drm_device *dev = connector->dev; + struct amdgpu_device *adev = drm_to_adev(dev); + + *val = adev->dm.dc->config.allow_edp_hotplug_detection; + + return 0; +} + +/* set allow_edp_hotplug_detection */ +static int allow_edp_hotplug_detection_set(void *data, u64 val) +{ + struct amdgpu_dm_connector *aconnector = data; + struct drm_connector *connector = &aconnector->base; + struct drm_device *dev = connector->dev; + struct amdgpu_device *adev = drm_to_adev(dev); + + adev->dm.dc->config.allow_edp_hotplug_detection = (uint32_t) val; + + return 0; +} + +/* + * Set dmcub trace event IRQ enable or disable. + * Usage to enable dmcub trace event IRQ: echo 1 > /sys/kernel/debug/dri/0/amdgpu_dm_dmcub_trace_event_en + * Usage to disable dmcub trace event IRQ: echo 0 > /sys/kernel/debug/dri/0/amdgpu_dm_dmcub_trace_event_en + */ +static int dmcub_trace_event_state_set(void *data, u64 val) +{ + struct amdgpu_device *adev = data; + + if (val == 1 || val == 0) { + dc_dmub_trace_event_control(adev->dm.dc, val); + adev->dm.dmcub_trace_event_en = (bool)val; + } else + return 0; + + return 0; +} + +/* + * The interface doesn't need get function, so it will return the + * value of zero + * Usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_dmcub_trace_event_en + */ +static int dmcub_trace_event_state_get(void *data, u64 *val) +{ + struct amdgpu_device *adev = data; + + *val = adev->dm.dmcub_trace_event_en; + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(dmcub_trace_event_state_fops, dmcub_trace_event_state_get, + dmcub_trace_event_state_set, "%llu\n"); + +DEFINE_DEBUGFS_ATTRIBUTE(psr_fops, psr_get, NULL, "%llu\n"); +DEFINE_DEBUGFS_ATTRIBUTE(psr_residency_fops, psr_read_residency, NULL, + "%llu\n"); + +DEFINE_DEBUGFS_ATTRIBUTE(allow_edp_hotplug_detection_fops, + allow_edp_hotplug_detection_get, + allow_edp_hotplug_detection_set, "%llu\n"); + +DEFINE_SHOW_ATTRIBUTE(current_backlight); +DEFINE_SHOW_ATTRIBUTE(target_backlight); + +static const struct { + char *name; + const struct file_operations *fops; +} connector_debugfs_entries[] = { + {"force_yuv420_output", &force_yuv420_output_fops}, + {"trigger_hotplug", &trigger_hotplug_debugfs_fops}, + {"internal_display", &internal_display_fops} +}; + +/* + * Returns supported customized link rates by this eDP panel. + * Example usage: cat /sys/kernel/debug/dri/0/eDP-x/ilr_setting + */ +static int edp_ilr_show(struct seq_file *m, void *unused) +{ + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(m->private); + struct dc_link *link = aconnector->dc_link; + uint8_t supported_link_rates[16]; + uint32_t link_rate_in_khz; + uint32_t entry = 0; + uint8_t dpcd_rev; + + memset(supported_link_rates, 0, sizeof(supported_link_rates)); + dm_helpers_dp_read_dpcd(link->ctx, link, DP_SUPPORTED_LINK_RATES, + supported_link_rates, sizeof(supported_link_rates)); + + dpcd_rev = link->dpcd_caps.dpcd_rev.raw; + + if (dpcd_rev >= DP_DPCD_REV_13 && + (supported_link_rates[entry+1] != 0 || supported_link_rates[entry] != 0)) { + + for (entry = 0; entry < 16; entry += 2) { + link_rate_in_khz = (supported_link_rates[entry+1] * 0x100 + + supported_link_rates[entry]) * 200; + seq_printf(m, "[%d] %d kHz\n", entry/2, link_rate_in_khz); + } + } else { + seq_puts(m, "ILR is not supported by this eDP panel.\n"); + } + + return 0; +} + +/* + * Set supported customized link rate to eDP panel. + * + * echo <lane_count> <link_rate option> > ilr_setting + * + * for example, supported ILR : [0] 1620000 kHz [1] 2160000 kHz [2] 2430000 kHz ... + * echo 4 1 > /sys/kernel/debug/dri/0/eDP-x/ilr_setting + * to set 4 lanes and 2.16 GHz + */ +static ssize_t edp_ilr_write(struct file *f, const char __user *buf, + size_t size, loff_t *pos) +{ + struct amdgpu_dm_connector *connector = file_inode(f)->i_private; + struct dc_link *link = connector->dc_link; + struct amdgpu_device *adev = drm_to_adev(connector->base.dev); + struct dc *dc = (struct dc *)link->dc; + struct dc_link_settings prefer_link_settings; + char *wr_buf = NULL; + const uint32_t wr_buf_size = 40; + /* 0: lane_count; 1: link_rate */ + int max_param_num = 2; + uint8_t param_nums = 0; + long param[2]; + bool valid_input = true; + + if (size == 0) + return -EINVAL; + + wr_buf = kcalloc(wr_buf_size, sizeof(char), GFP_KERNEL); + if (!wr_buf) + return -ENOMEM; + + if (parse_write_buffer_into_params(wr_buf, wr_buf_size, + (long *)param, buf, + max_param_num, + ¶m_nums)) { + kfree(wr_buf); + return -EINVAL; + } + + if (param_nums <= 0) { + kfree(wr_buf); + return -EINVAL; + } + + switch (param[0]) { + case LANE_COUNT_ONE: + case LANE_COUNT_TWO: + case LANE_COUNT_FOUR: + break; + default: + valid_input = false; + break; + } + + if (param[1] >= link->dpcd_caps.edp_supported_link_rates_count) + valid_input = false; + + if (!valid_input) { + kfree(wr_buf); + DRM_DEBUG_DRIVER("Invalid Input value. No HW will be programmed\n"); + prefer_link_settings.use_link_rate_set = false; + mutex_lock(&adev->dm.dc_lock); + dc_link_set_preferred_training_settings(dc, NULL, NULL, link, false); + mutex_unlock(&adev->dm.dc_lock); + return size; + } + + /* save user force lane_count, link_rate to preferred settings + * spread spectrum will not be changed + */ + prefer_link_settings.link_spread = link->cur_link_settings.link_spread; + prefer_link_settings.lane_count = param[0]; + prefer_link_settings.use_link_rate_set = true; + prefer_link_settings.link_rate_set = param[1]; + prefer_link_settings.link_rate = link->dpcd_caps.edp_supported_link_rates[param[1]]; + + mutex_lock(&adev->dm.dc_lock); + dc_link_set_preferred_training_settings(dc, &prefer_link_settings, + NULL, link, false); + mutex_unlock(&adev->dm.dc_lock); + + kfree(wr_buf); + return size; +} + +static int edp_ilr_open(struct inode *inode, struct file *file) +{ + return single_open(file, edp_ilr_show, inode->i_private); +} + +static const struct file_operations edp_ilr_debugfs_fops = { + .owner = THIS_MODULE, + .open = edp_ilr_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, + .write = edp_ilr_write +}; + +void connector_debugfs_init(struct amdgpu_dm_connector *connector) +{ + int i; + struct dentry *dir = connector->base.debugfs_entry; + + if (connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort || + connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) { + for (i = 0; i < ARRAY_SIZE(dp_debugfs_entries); i++) { + debugfs_create_file(dp_debugfs_entries[i].name, + 0644, dir, connector, + dp_debugfs_entries[i].fops); + } + } + if (connector->base.connector_type == DRM_MODE_CONNECTOR_eDP) { + debugfs_create_file_unsafe("psr_capability", 0444, dir, connector, &psr_capability_fops); + debugfs_create_file_unsafe("psr_state", 0444, dir, connector, &psr_fops); + debugfs_create_file_unsafe("psr_residency", 0444, dir, + connector, &psr_residency_fops); + debugfs_create_file("amdgpu_current_backlight_pwm", 0444, dir, connector, + ¤t_backlight_fops); + debugfs_create_file("amdgpu_target_backlight_pwm", 0444, dir, connector, + &target_backlight_fops); + debugfs_create_file("ilr_setting", 0644, dir, connector, + &edp_ilr_debugfs_fops); + debugfs_create_file("allow_edp_hotplug_detection", 0644, dir, connector, + &allow_edp_hotplug_detection_fops); + } + + for (i = 0; i < ARRAY_SIZE(connector_debugfs_entries); i++) { + debugfs_create_file(connector_debugfs_entries[i].name, + 0644, dir, connector, + connector_debugfs_entries[i].fops); + } + + if (connector->base.connector_type == DRM_MODE_CONNECTOR_HDMIA) { + for (i = 0; i < ARRAY_SIZE(hdmi_debugfs_entries); i++) { + debugfs_create_file(hdmi_debugfs_entries[i].name, + 0644, dir, connector, + hdmi_debugfs_entries[i].fops); + } + } +} + +#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY +/* + * Set crc window coordinate x start + */ +static int crc_win_x_start_set(void *data, u64 val) +{ + struct drm_crtc *crtc = data; + struct drm_device *drm_dev = crtc->dev; + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + + spin_lock_irq(&drm_dev->event_lock); + acrtc->dm_irq_params.window_param.x_start = (uint16_t) val; + acrtc->dm_irq_params.window_param.update_win = false; + spin_unlock_irq(&drm_dev->event_lock); + + return 0; +} + +/* + * Get crc window coordinate x start + */ +static int crc_win_x_start_get(void *data, u64 *val) +{ + struct drm_crtc *crtc = data; + struct drm_device *drm_dev = crtc->dev; + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + + spin_lock_irq(&drm_dev->event_lock); + *val = acrtc->dm_irq_params.window_param.x_start; + spin_unlock_irq(&drm_dev->event_lock); + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(crc_win_x_start_fops, crc_win_x_start_get, + crc_win_x_start_set, "%llu\n"); + + +/* + * Set crc window coordinate y start + */ +static int crc_win_y_start_set(void *data, u64 val) +{ + struct drm_crtc *crtc = data; + struct drm_device *drm_dev = crtc->dev; + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + + spin_lock_irq(&drm_dev->event_lock); + acrtc->dm_irq_params.window_param.y_start = (uint16_t) val; + acrtc->dm_irq_params.window_param.update_win = false; + spin_unlock_irq(&drm_dev->event_lock); + + return 0; +} + +/* + * Get crc window coordinate y start + */ +static int crc_win_y_start_get(void *data, u64 *val) +{ + struct drm_crtc *crtc = data; + struct drm_device *drm_dev = crtc->dev; + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + + spin_lock_irq(&drm_dev->event_lock); + *val = acrtc->dm_irq_params.window_param.y_start; + spin_unlock_irq(&drm_dev->event_lock); + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(crc_win_y_start_fops, crc_win_y_start_get, + crc_win_y_start_set, "%llu\n"); + +/* + * Set crc window coordinate x end + */ +static int crc_win_x_end_set(void *data, u64 val) +{ + struct drm_crtc *crtc = data; + struct drm_device *drm_dev = crtc->dev; + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + + spin_lock_irq(&drm_dev->event_lock); + acrtc->dm_irq_params.window_param.x_end = (uint16_t) val; + acrtc->dm_irq_params.window_param.update_win = false; + spin_unlock_irq(&drm_dev->event_lock); + + return 0; +} + +/* + * Get crc window coordinate x end + */ +static int crc_win_x_end_get(void *data, u64 *val) +{ + struct drm_crtc *crtc = data; + struct drm_device *drm_dev = crtc->dev; + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + + spin_lock_irq(&drm_dev->event_lock); + *val = acrtc->dm_irq_params.window_param.x_end; + spin_unlock_irq(&drm_dev->event_lock); + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(crc_win_x_end_fops, crc_win_x_end_get, + crc_win_x_end_set, "%llu\n"); + +/* + * Set crc window coordinate y end + */ +static int crc_win_y_end_set(void *data, u64 val) +{ + struct drm_crtc *crtc = data; + struct drm_device *drm_dev = crtc->dev; + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + + spin_lock_irq(&drm_dev->event_lock); + acrtc->dm_irq_params.window_param.y_end = (uint16_t) val; + acrtc->dm_irq_params.window_param.update_win = false; + spin_unlock_irq(&drm_dev->event_lock); + + return 0; +} + +/* + * Get crc window coordinate y end + */ +static int crc_win_y_end_get(void *data, u64 *val) +{ + struct drm_crtc *crtc = data; + struct drm_device *drm_dev = crtc->dev; + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(crtc); + + spin_lock_irq(&drm_dev->event_lock); + *val = acrtc->dm_irq_params.window_param.y_end; + spin_unlock_irq(&drm_dev->event_lock); + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(crc_win_y_end_fops, crc_win_y_end_get, + crc_win_y_end_set, "%llu\n"); +/* + * Trigger to commit crc window + */ +static int crc_win_update_set(void *data, u64 val) +{ + struct drm_crtc *crtc = data; + struct amdgpu_crtc *acrtc; + struct amdgpu_device *adev = drm_to_adev(crtc->dev); + + if (val) { + acrtc = to_amdgpu_crtc(crtc); + mutex_lock(&adev->dm.dc_lock); + /* PSR may write to OTG CRC window control register, + * so close it before starting secure_display. + */ + amdgpu_dm_psr_disable(acrtc->dm_irq_params.stream); + + spin_lock_irq(&adev_to_drm(adev)->event_lock); + + acrtc->dm_irq_params.window_param.activated = true; + acrtc->dm_irq_params.window_param.update_win = true; + acrtc->dm_irq_params.window_param.skip_frame_cnt = 0; + + spin_unlock_irq(&adev_to_drm(adev)->event_lock); + mutex_unlock(&adev->dm.dc_lock); + } + + return 0; +} + +/* + * Get crc window update flag + */ +static int crc_win_update_get(void *data, u64 *val) +{ + *val = 0; + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(crc_win_update_fops, crc_win_update_get, + crc_win_update_set, "%llu\n"); +#endif +void crtc_debugfs_init(struct drm_crtc *crtc) +{ +#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY + struct dentry *dir = debugfs_lookup("crc", crtc->debugfs_entry); + + if (!dir) + return; + + debugfs_create_file_unsafe("crc_win_x_start", 0644, dir, crtc, + &crc_win_x_start_fops); + debugfs_create_file_unsafe("crc_win_y_start", 0644, dir, crtc, + &crc_win_y_start_fops); + debugfs_create_file_unsafe("crc_win_x_end", 0644, dir, crtc, + &crc_win_x_end_fops); + debugfs_create_file_unsafe("crc_win_y_end", 0644, dir, crtc, + &crc_win_y_end_fops); + debugfs_create_file_unsafe("crc_win_update", 0644, dir, crtc, + &crc_win_update_fops); + dput(dir); +#endif + debugfs_create_file("amdgpu_current_bpc", 0644, crtc->debugfs_entry, + crtc, &amdgpu_current_bpc_fops); + debugfs_create_file("amdgpu_current_colorspace", 0644, crtc->debugfs_entry, + crtc, &amdgpu_current_colorspace_fops); +} + +/* + * Writes DTN log state to the user supplied buffer. + * Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_dtn_log + */ +static ssize_t dtn_log_read( + struct file *f, + char __user *buf, + size_t size, + loff_t *pos) +{ + struct amdgpu_device *adev = file_inode(f)->i_private; + struct dc *dc = adev->dm.dc; + struct dc_log_buffer_ctx log_ctx = { 0 }; + ssize_t result = 0; + + if (!buf || !size) + return -EINVAL; + + if (!dc->hwss.log_hw_state) + return 0; + + dc->hwss.log_hw_state(dc, &log_ctx); + + if (*pos < log_ctx.pos) { + size_t to_copy = log_ctx.pos - *pos; + + to_copy = min(to_copy, size); + + if (!copy_to_user(buf, log_ctx.buf + *pos, to_copy)) { + *pos += to_copy; + result = to_copy; + } + } + + kfree(log_ctx.buf); + + return result; +} + +/* + * Writes DTN log state to dmesg when triggered via a write. + * Example usage: echo 1 > /sys/kernel/debug/dri/0/amdgpu_dm_dtn_log + */ +static ssize_t dtn_log_write( + struct file *f, + const char __user *buf, + size_t size, + loff_t *pos) +{ + struct amdgpu_device *adev = file_inode(f)->i_private; + struct dc *dc = adev->dm.dc; + + /* Write triggers log output via dmesg. */ + if (size == 0) + return 0; + + if (dc->hwss.log_hw_state) + dc->hwss.log_hw_state(dc, NULL); + + return size; +} + +static int mst_topo_show(struct seq_file *m, void *unused) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)m->private; + struct drm_device *dev = adev_to_drm(adev); + struct drm_connector *connector; + struct drm_connector_list_iter conn_iter; + struct amdgpu_dm_connector *aconnector; + + drm_connector_list_iter_begin(dev, &conn_iter); + drm_for_each_connector_iter(connector, &conn_iter) { + if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) + continue; + + aconnector = to_amdgpu_dm_connector(connector); + + /* Ensure we're only dumping the topology of a root mst node */ + if (!aconnector->mst_mgr.mst_state) + continue; + + seq_printf(m, "\nMST topology for connector %d\n", aconnector->connector_id); + drm_dp_mst_dump_topology(m, &aconnector->mst_mgr); + } + drm_connector_list_iter_end(&conn_iter); + + return 0; +} + +/* + * Sets trigger hpd for MST topologies. + * All connected connectors will be rediscovered and re started as needed if val of 1 is sent. + * All topologies will be disconnected if val of 0 is set . + * Usage to enable topologies: echo 1 > /sys/kernel/debug/dri/0/amdgpu_dm_trigger_hpd_mst + * Usage to disable topologies: echo 0 > /sys/kernel/debug/dri/0/amdgpu_dm_trigger_hpd_mst + */ +static int trigger_hpd_mst_set(void *data, u64 val) +{ + struct amdgpu_device *adev = data; + struct drm_device *dev = adev_to_drm(adev); + struct drm_connector_list_iter iter; + struct amdgpu_dm_connector *aconnector; + struct drm_connector *connector; + struct dc_link *link = NULL; + + if (val == 1) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { + aconnector = to_amdgpu_dm_connector(connector); + if (aconnector->dc_link->type == dc_connection_mst_branch && + aconnector->mst_mgr.aux) { + mutex_lock(&adev->dm.dc_lock); + dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD); + mutex_unlock(&adev->dm.dc_lock); + + drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true); + } + } + } else if (val == 0) { + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { + aconnector = to_amdgpu_dm_connector(connector); + if (!aconnector->dc_link) + continue; + + if (!aconnector->mst_root) + continue; + + link = aconnector->dc_link; + dc_link_dp_receiver_power_ctrl(link, false); + drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_root->mst_mgr, false); + link->mst_stream_alloc_table.stream_count = 0; + memset(link->mst_stream_alloc_table.stream_allocations, 0, + sizeof(link->mst_stream_alloc_table.stream_allocations)); + } + } else { + return 0; + } + drm_kms_helper_hotplug_event(dev); + + return 0; +} + +/* + * The interface doesn't need get function, so it will return the + * value of zero + * Usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_trigger_hpd_mst + */ +static int trigger_hpd_mst_get(void *data, u64 *val) +{ + *val = 0; + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(trigger_hpd_mst_ops, trigger_hpd_mst_get, + trigger_hpd_mst_set, "%llu\n"); + + +/* + * Sets the force_timing_sync debug option from the given string. + * All connected displays will be force synchronized immediately. + * Usage: echo 1 > /sys/kernel/debug/dri/0/amdgpu_dm_force_timing_sync + */ +static int force_timing_sync_set(void *data, u64 val) +{ + struct amdgpu_device *adev = data; + + adev->dm.force_timing_sync = (bool)val; + + amdgpu_dm_trigger_timing_sync(adev_to_drm(adev)); + + return 0; +} + +/* + * Gets the force_timing_sync debug option value into the given buffer. + * Usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_force_timing_sync + */ +static int force_timing_sync_get(void *data, u64 *val) +{ + struct amdgpu_device *adev = data; + + *val = adev->dm.force_timing_sync; + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(force_timing_sync_ops, force_timing_sync_get, + force_timing_sync_set, "%llu\n"); + + +/* + * Disables all HPD and HPD RX interrupt handling in the + * driver when set to 1. Default is 0. + */ +static int disable_hpd_set(void *data, u64 val) +{ + struct amdgpu_device *adev = data; + + adev->dm.disable_hpd_irq = (bool)val; + + return 0; +} + + +/* + * Returns 1 if HPD and HPRX interrupt handling is disabled, + * 0 otherwise. + */ +static int disable_hpd_get(void *data, u64 *val) +{ + struct amdgpu_device *adev = data; + + *val = adev->dm.disable_hpd_irq; + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(disable_hpd_ops, disable_hpd_get, + disable_hpd_set, "%llu\n"); + +/* + * Temporary w/a to force sst sequence in M42D DP2 mst receiver + * Example usage: echo 1 > /sys/kernel/debug/dri/0/amdgpu_dm_dp_set_mst_en_for_sst + */ +static int dp_force_sst_set(void *data, u64 val) +{ + struct amdgpu_device *adev = data; + + adev->dm.dc->debug.set_mst_en_for_sst = val; + + return 0; +} + +static int dp_force_sst_get(void *data, u64 *val) +{ + struct amdgpu_device *adev = data; + + *val = adev->dm.dc->debug.set_mst_en_for_sst; + + return 0; +} +DEFINE_DEBUGFS_ATTRIBUTE(dp_set_mst_en_for_sst_ops, dp_force_sst_get, + dp_force_sst_set, "%llu\n"); + +/* + * Force DP2 sequence without VESA certified cable. + * Example usage: echo 1 > /sys/kernel/debug/dri/0/amdgpu_dm_dp_ignore_cable_id + */ +static int dp_ignore_cable_id_set(void *data, u64 val) +{ + struct amdgpu_device *adev = data; + + adev->dm.dc->debug.ignore_cable_id = val; + + return 0; +} + +static int dp_ignore_cable_id_get(void *data, u64 *val) +{ + struct amdgpu_device *adev = data; + + *val = adev->dm.dc->debug.ignore_cable_id; + + return 0; +} +DEFINE_DEBUGFS_ATTRIBUTE(dp_ignore_cable_id_ops, dp_ignore_cable_id_get, + dp_ignore_cable_id_set, "%llu\n"); + +/* + * Sets the DC visual confirm debug option from the given string. + * Example usage: echo 1 > /sys/kernel/debug/dri/0/amdgpu_visual_confirm + */ +static int visual_confirm_set(void *data, u64 val) +{ + struct amdgpu_device *adev = data; + + adev->dm.dc->debug.visual_confirm = (enum visual_confirm)val; + + return 0; +} + +/* + * Reads the DC visual confirm debug option value into the given buffer. + * Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_visual_confirm + */ +static int visual_confirm_get(void *data, u64 *val) +{ + struct amdgpu_device *adev = data; + + *val = adev->dm.dc->debug.visual_confirm; + + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(mst_topo); +DEFINE_DEBUGFS_ATTRIBUTE(visual_confirm_fops, visual_confirm_get, + visual_confirm_set, "%llu\n"); + + +/* + * Sets the DC skip_detection_link_training debug option from the given string. + * Example usage: echo 1 > /sys/kernel/debug/dri/0/amdgpu_skip_detection_link_training + */ +static int skip_detection_link_training_set(void *data, u64 val) +{ + struct amdgpu_device *adev = data; + + if (val == 0) + adev->dm.dc->debug.skip_detection_link_training = false; + else + adev->dm.dc->debug.skip_detection_link_training = true; + + return 0; +} + +/* + * Reads the DC skip_detection_link_training debug option value into the given buffer. + * Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_skip_detection_link_training + */ +static int skip_detection_link_training_get(void *data, u64 *val) +{ + struct amdgpu_device *adev = data; + + *val = adev->dm.dc->debug.skip_detection_link_training; + + return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE(skip_detection_link_training_fops, + skip_detection_link_training_get, + skip_detection_link_training_set, "%llu\n"); + +/* + * Dumps the DCC_EN bit for each pipe. + * Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_dcc_en + */ +static ssize_t dcc_en_bits_read( + struct file *f, + char __user *buf, + size_t size, + loff_t *pos) +{ + struct amdgpu_device *adev = file_inode(f)->i_private; + struct dc *dc = adev->dm.dc; + char *rd_buf = NULL; + const uint32_t rd_buf_size = 32; + uint32_t result = 0; + int offset = 0; + int num_pipes = dc->res_pool->pipe_count; + int *dcc_en_bits; + int i, r; + + dcc_en_bits = kcalloc(num_pipes, sizeof(int), GFP_KERNEL); + if (!dcc_en_bits) + return -ENOMEM; + + if (!dc->hwss.get_dcc_en_bits) { + kfree(dcc_en_bits); + return 0; + } + + dc->hwss.get_dcc_en_bits(dc, dcc_en_bits); + + rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL); + if (!rd_buf) { + kfree(dcc_en_bits); + return -ENOMEM; + } + + for (i = 0; i < num_pipes; i++) + offset += snprintf(rd_buf + offset, rd_buf_size - offset, + "%d ", dcc_en_bits[i]); + rd_buf[strlen(rd_buf)] = '\n'; + + kfree(dcc_en_bits); + + while (size) { + if (*pos >= rd_buf_size) + break; + r = put_user(*(rd_buf + result), buf); + if (r) { + kfree(rd_buf); + return r; /* r = -EFAULT */ + } + buf += 1; + size -= 1; + *pos += 1; + result += 1; + } + + kfree(rd_buf); + return result; +} + +void dtn_debugfs_init(struct amdgpu_device *adev) +{ + static const struct file_operations dtn_log_fops = { + .owner = THIS_MODULE, + .read = dtn_log_read, + .write = dtn_log_write, + .llseek = default_llseek + }; + static const struct file_operations dcc_en_bits_fops = { + .owner = THIS_MODULE, + .read = dcc_en_bits_read, + .llseek = default_llseek + }; + + struct drm_minor *minor = adev_to_drm(adev)->primary; + struct dentry *root = minor->debugfs_root; + + debugfs_create_file("amdgpu_mst_topology", 0444, root, + adev, &mst_topo_fops); + debugfs_create_file("amdgpu_dm_dtn_log", 0644, root, adev, + &dtn_log_fops); + debugfs_create_file("amdgpu_dm_dp_set_mst_en_for_sst", 0644, root, adev, + &dp_set_mst_en_for_sst_ops); + debugfs_create_file("amdgpu_dm_dp_ignore_cable_id", 0644, root, adev, + &dp_ignore_cable_id_ops); + + debugfs_create_file_unsafe("amdgpu_dm_visual_confirm", 0644, root, adev, + &visual_confirm_fops); + + debugfs_create_file_unsafe("amdgpu_dm_skip_detection_link_training", 0644, root, adev, + &skip_detection_link_training_fops); + + debugfs_create_file_unsafe("amdgpu_dm_dmub_tracebuffer", 0644, root, + adev, &dmub_tracebuffer_fops); + + debugfs_create_file_unsafe("amdgpu_dm_dmub_fw_state", 0644, root, + adev, &dmub_fw_state_fops); + + debugfs_create_file_unsafe("amdgpu_dm_force_timing_sync", 0644, root, + adev, &force_timing_sync_ops); + + debugfs_create_file_unsafe("amdgpu_dm_dmcub_trace_event_en", 0644, root, + adev, &dmcub_trace_event_state_fops); + + debugfs_create_file_unsafe("amdgpu_dm_trigger_hpd_mst", 0644, root, + adev, &trigger_hpd_mst_ops); + + debugfs_create_file_unsafe("amdgpu_dm_dcc_en", 0644, root, adev, + &dcc_en_bits_fops); + + debugfs_create_file_unsafe("amdgpu_dm_disable_hpd", 0644, root, adev, + &disable_hpd_ops); + +} diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h new file mode 100644 index 0000000000..071200473c --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.h @@ -0,0 +1,36 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __AMDGPU_DM_DEBUGFS_H__ +#define __AMDGPU_DM_DEBUGFS_H__ + +#include "amdgpu.h" +#include "amdgpu_dm.h" + +void connector_debugfs_init(struct amdgpu_dm_connector *connector); +void dtn_debugfs_init(struct amdgpu_device *adev); +void crtc_debugfs_init(struct drm_crtc *crtc); + +#endif diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c new file mode 100644 index 0000000000..20cfc5be21 --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c @@ -0,0 +1,779 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "amdgpu_dm_hdcp.h" +#include "amdgpu.h" +#include "amdgpu_dm.h" +#include "dm_helpers.h" +#include <drm/display/drm_hdcp_helper.h> +#include "hdcp_psp.h" + +/* + * If the SRM version being loaded is less than or equal to the + * currently loaded SRM, psp will return 0xFFFF as the version + */ +#define PSP_SRM_VERSION_MAX 0xFFFF + +static bool +lp_write_i2c(void *handle, uint32_t address, const uint8_t *data, uint32_t size) +{ + struct dc_link *link = handle; + struct i2c_payload i2c_payloads[] = {{true, address, size, (void *)data} }; + struct i2c_command cmd = {i2c_payloads, 1, I2C_COMMAND_ENGINE_HW, + link->dc->caps.i2c_speed_in_khz}; + + return dm_helpers_submit_i2c(link->ctx, link, &cmd); +} + +static bool +lp_read_i2c(void *handle, uint32_t address, uint8_t offset, uint8_t *data, uint32_t size) +{ + struct dc_link *link = handle; + + struct i2c_payload i2c_payloads[] = {{true, address, 1, &offset}, + {false, address, size, data} }; + struct i2c_command cmd = {i2c_payloads, 2, I2C_COMMAND_ENGINE_HW, + link->dc->caps.i2c_speed_in_khz}; + + return dm_helpers_submit_i2c(link->ctx, link, &cmd); +} + +static bool +lp_write_dpcd(void *handle, uint32_t address, const uint8_t *data, uint32_t size) +{ + struct dc_link *link = handle; + + return dm_helpers_dp_write_dpcd(link->ctx, link, address, data, size); +} + +static bool +lp_read_dpcd(void *handle, uint32_t address, uint8_t *data, uint32_t size) +{ + struct dc_link *link = handle; + + return dm_helpers_dp_read_dpcd(link->ctx, link, address, data, size); +} + +static uint8_t *psp_get_srm(struct psp_context *psp, uint32_t *srm_version, uint32_t *srm_size) +{ + struct ta_hdcp_shared_memory *hdcp_cmd; + + if (!psp->hdcp_context.context.initialized) { + DRM_WARN("Failed to get hdcp srm. HDCP TA is not initialized."); + return NULL; + } + + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf; + memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + + hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP_GET_SRM; + psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS) + return NULL; + + *srm_version = hdcp_cmd->out_msg.hdcp_get_srm.srm_version; + *srm_size = hdcp_cmd->out_msg.hdcp_get_srm.srm_buf_size; + + return hdcp_cmd->out_msg.hdcp_get_srm.srm_buf; +} + +static int psp_set_srm(struct psp_context *psp, + u8 *srm, uint32_t srm_size, uint32_t *srm_version) +{ + struct ta_hdcp_shared_memory *hdcp_cmd; + + if (!psp->hdcp_context.context.initialized) { + DRM_WARN("Failed to get hdcp srm. HDCP TA is not initialized."); + return -EINVAL; + } + + hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf; + memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + + memcpy(hdcp_cmd->in_msg.hdcp_set_srm.srm_buf, srm, srm_size); + hdcp_cmd->in_msg.hdcp_set_srm.srm_buf_size = srm_size; + hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP_SET_SRM; + + psp_hdcp_invoke(psp, hdcp_cmd->cmd_id); + + if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS || + hdcp_cmd->out_msg.hdcp_set_srm.valid_signature != 1 || + hdcp_cmd->out_msg.hdcp_set_srm.srm_version == PSP_SRM_VERSION_MAX) + return -EINVAL; + + *srm_version = hdcp_cmd->out_msg.hdcp_set_srm.srm_version; + return 0; +} + +static void process_output(struct hdcp_workqueue *hdcp_work) +{ + struct mod_hdcp_output output = hdcp_work->output; + + if (output.callback_stop) + cancel_delayed_work(&hdcp_work->callback_dwork); + + if (output.callback_needed) + schedule_delayed_work(&hdcp_work->callback_dwork, + msecs_to_jiffies(output.callback_delay)); + + if (output.watchdog_timer_stop) + cancel_delayed_work(&hdcp_work->watchdog_timer_dwork); + + if (output.watchdog_timer_needed) + schedule_delayed_work(&hdcp_work->watchdog_timer_dwork, + msecs_to_jiffies(output.watchdog_timer_delay)); + + schedule_delayed_work(&hdcp_work->property_validate_dwork, msecs_to_jiffies(0)); +} + +static void link_lock(struct hdcp_workqueue *work, bool lock) +{ + int i = 0; + + for (i = 0; i < work->max_link; i++) { + if (lock) + mutex_lock(&work[i].mutex); + else + mutex_unlock(&work[i].mutex); + } +} + +void hdcp_update_display(struct hdcp_workqueue *hdcp_work, + unsigned int link_index, + struct amdgpu_dm_connector *aconnector, + u8 content_type, + bool enable_encryption) +{ + struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; + struct mod_hdcp_link_adjustment link_adjust; + struct mod_hdcp_display_adjustment display_adjust; + unsigned int conn_index = aconnector->base.index; + + mutex_lock(&hdcp_w->mutex); + hdcp_w->aconnector[conn_index] = aconnector; + + memset(&link_adjust, 0, sizeof(link_adjust)); + memset(&display_adjust, 0, sizeof(display_adjust)); + + if (enable_encryption) { + /* Explicitly set the saved SRM as sysfs call will be after we already enabled hdcp + * (s3 resume case) + */ + if (hdcp_work->srm_size > 0) + psp_set_srm(hdcp_work->hdcp.config.psp.handle, hdcp_work->srm, + hdcp_work->srm_size, + &hdcp_work->srm_version); + + display_adjust.disable = MOD_HDCP_DISPLAY_NOT_DISABLE; + + link_adjust.auth_delay = 2; + + if (content_type == DRM_MODE_HDCP_CONTENT_TYPE0) { + link_adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0; + } else if (content_type == DRM_MODE_HDCP_CONTENT_TYPE1) { + link_adjust.hdcp1.disable = 1; + link_adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_1; + } + + schedule_delayed_work(&hdcp_w->property_validate_dwork, + msecs_to_jiffies(DRM_HDCP_CHECK_PERIOD_MS)); + } else { + display_adjust.disable = MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION; + hdcp_w->encryption_status[conn_index] = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + cancel_delayed_work(&hdcp_w->property_validate_dwork); + } + + mod_hdcp_update_display(&hdcp_w->hdcp, conn_index, &link_adjust, &display_adjust, &hdcp_w->output); + + process_output(hdcp_w); + mutex_unlock(&hdcp_w->mutex); +} + +static void hdcp_remove_display(struct hdcp_workqueue *hdcp_work, + unsigned int link_index, + struct amdgpu_dm_connector *aconnector) +{ + struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; + struct drm_connector_state *conn_state = aconnector->base.state; + unsigned int conn_index = aconnector->base.index; + + mutex_lock(&hdcp_w->mutex); + hdcp_w->aconnector[conn_index] = aconnector; + + /* the removal of display will invoke auth reset -> hdcp destroy and + * we'd expect the Content Protection (CP) property changed back to + * DESIRED if at the time ENABLED. CP property change should occur + * before the element removed from linked list. + */ + if (conn_state && conn_state->content_protection == DRM_MODE_CONTENT_PROTECTION_ENABLED) { + conn_state->content_protection = DRM_MODE_CONTENT_PROTECTION_DESIRED; + + DRM_DEBUG_DRIVER("[HDCP_DM] display %d, CP 2 -> 1, type %u, DPMS %u\n", + aconnector->base.index, conn_state->hdcp_content_type, + aconnector->base.dpms); + } + + mod_hdcp_remove_display(&hdcp_w->hdcp, aconnector->base.index, &hdcp_w->output); + + process_output(hdcp_w); + mutex_unlock(&hdcp_w->mutex); +} + +void hdcp_reset_display(struct hdcp_workqueue *hdcp_work, unsigned int link_index) +{ + struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; + unsigned int conn_index; + + mutex_lock(&hdcp_w->mutex); + + mod_hdcp_reset_connection(&hdcp_w->hdcp, &hdcp_w->output); + + cancel_delayed_work(&hdcp_w->property_validate_dwork); + + for (conn_index = 0; conn_index < AMDGPU_DM_MAX_DISPLAY_INDEX; conn_index++) { + hdcp_w->encryption_status[conn_index] = + MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + } + + process_output(hdcp_w); + + mutex_unlock(&hdcp_w->mutex); +} + +void hdcp_handle_cpirq(struct hdcp_workqueue *hdcp_work, unsigned int link_index) +{ + struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; + + schedule_work(&hdcp_w->cpirq_work); +} + +static void event_callback(struct work_struct *work) +{ + struct hdcp_workqueue *hdcp_work; + + hdcp_work = container_of(to_delayed_work(work), struct hdcp_workqueue, + callback_dwork); + + mutex_lock(&hdcp_work->mutex); + + cancel_delayed_work(&hdcp_work->callback_dwork); + + mod_hdcp_process_event(&hdcp_work->hdcp, MOD_HDCP_EVENT_CALLBACK, + &hdcp_work->output); + + process_output(hdcp_work); + + mutex_unlock(&hdcp_work->mutex); +} + +static void event_property_update(struct work_struct *work) +{ + struct hdcp_workqueue *hdcp_work = container_of(work, struct hdcp_workqueue, + property_update_work); + struct amdgpu_dm_connector *aconnector = NULL; + struct drm_device *dev; + long ret; + unsigned int conn_index; + struct drm_connector *connector; + struct drm_connector_state *conn_state; + + for (conn_index = 0; conn_index < AMDGPU_DM_MAX_DISPLAY_INDEX; conn_index++) { + aconnector = hdcp_work->aconnector[conn_index]; + + if (!aconnector) + continue; + + connector = &aconnector->base; + + /* check if display connected */ + if (connector->status != connector_status_connected) + continue; + + conn_state = aconnector->base.state; + + if (!conn_state) + continue; + + dev = connector->dev; + + if (!dev) + continue; + + drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); + mutex_lock(&hdcp_work->mutex); + + if (conn_state->commit) { + ret = wait_for_completion_interruptible_timeout(&conn_state->commit->hw_done, + 10 * HZ); + if (ret == 0) { + DRM_ERROR("HDCP state unknown! Setting it to DESIRED\n"); + hdcp_work->encryption_status[conn_index] = + MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + } + } + if (hdcp_work->encryption_status[conn_index] != + MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF) { + if (conn_state->hdcp_content_type == + DRM_MODE_HDCP_CONTENT_TYPE0 && + hdcp_work->encryption_status[conn_index] <= + MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE0_ON) { + DRM_DEBUG_DRIVER("[HDCP_DM] DRM_MODE_CONTENT_PROTECTION_ENABLED\n"); + drm_hdcp_update_content_protection(connector, + DRM_MODE_CONTENT_PROTECTION_ENABLED); + } else if (conn_state->hdcp_content_type == + DRM_MODE_HDCP_CONTENT_TYPE1 && + hdcp_work->encryption_status[conn_index] == + MOD_HDCP_ENCRYPTION_STATUS_HDCP2_TYPE1_ON) { + drm_hdcp_update_content_protection(connector, + DRM_MODE_CONTENT_PROTECTION_ENABLED); + } + } else { + DRM_DEBUG_DRIVER("[HDCP_DM] DRM_MODE_CONTENT_PROTECTION_DESIRED\n"); + drm_hdcp_update_content_protection(connector, + DRM_MODE_CONTENT_PROTECTION_DESIRED); + } + mutex_unlock(&hdcp_work->mutex); + drm_modeset_unlock(&dev->mode_config.connection_mutex); + } +} + +static void event_property_validate(struct work_struct *work) +{ + struct hdcp_workqueue *hdcp_work = + container_of(to_delayed_work(work), struct hdcp_workqueue, property_validate_dwork); + struct mod_hdcp_display_query query; + struct amdgpu_dm_connector *aconnector; + unsigned int conn_index; + + mutex_lock(&hdcp_work->mutex); + + for (conn_index = 0; conn_index < AMDGPU_DM_MAX_DISPLAY_INDEX; + conn_index++) { + aconnector = hdcp_work->aconnector[conn_index]; + + if (!aconnector) + continue; + + /* check if display connected */ + if (aconnector->base.status != connector_status_connected) + continue; + + if (!aconnector->base.state) + continue; + + query.encryption_status = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + mod_hdcp_query_display(&hdcp_work->hdcp, aconnector->base.index, + &query); + + DRM_DEBUG_DRIVER("[HDCP_DM] disp %d, connector->CP %u, (query, work): (%d, %d)\n", + aconnector->base.index, + aconnector->base.state->content_protection, + query.encryption_status, + hdcp_work->encryption_status[conn_index]); + + if (query.encryption_status != + hdcp_work->encryption_status[conn_index]) { + DRM_DEBUG_DRIVER("[HDCP_DM] encryption_status change from %x to %x\n", + hdcp_work->encryption_status[conn_index], + query.encryption_status); + + hdcp_work->encryption_status[conn_index] = + query.encryption_status; + + DRM_DEBUG_DRIVER("[HDCP_DM] trigger property_update_work\n"); + + schedule_work(&hdcp_work->property_update_work); + } + } + + mutex_unlock(&hdcp_work->mutex); +} + +static void event_watchdog_timer(struct work_struct *work) +{ + struct hdcp_workqueue *hdcp_work; + + hdcp_work = container_of(to_delayed_work(work), + struct hdcp_workqueue, + watchdog_timer_dwork); + + mutex_lock(&hdcp_work->mutex); + + cancel_delayed_work(&hdcp_work->watchdog_timer_dwork); + + mod_hdcp_process_event(&hdcp_work->hdcp, + MOD_HDCP_EVENT_WATCHDOG_TIMEOUT, + &hdcp_work->output); + + process_output(hdcp_work); + + mutex_unlock(&hdcp_work->mutex); +} + +static void event_cpirq(struct work_struct *work) +{ + struct hdcp_workqueue *hdcp_work; + + hdcp_work = container_of(work, struct hdcp_workqueue, cpirq_work); + + mutex_lock(&hdcp_work->mutex); + + mod_hdcp_process_event(&hdcp_work->hdcp, MOD_HDCP_EVENT_CPIRQ, &hdcp_work->output); + + process_output(hdcp_work); + + mutex_unlock(&hdcp_work->mutex); +} + +void hdcp_destroy(struct kobject *kobj, struct hdcp_workqueue *hdcp_work) +{ + int i = 0; + + for (i = 0; i < hdcp_work->max_link; i++) { + cancel_delayed_work_sync(&hdcp_work[i].callback_dwork); + cancel_delayed_work_sync(&hdcp_work[i].watchdog_timer_dwork); + } + + sysfs_remove_bin_file(kobj, &hdcp_work[0].attr); + kfree(hdcp_work->srm); + kfree(hdcp_work->srm_temp); + kfree(hdcp_work); +} + +static bool enable_assr(void *handle, struct dc_link *link) +{ + struct hdcp_workqueue *hdcp_work = handle; + struct mod_hdcp hdcp = hdcp_work->hdcp; + struct psp_context *psp = hdcp.config.psp.handle; + struct ta_dtm_shared_memory *dtm_cmd; + bool res = true; + + if (!psp->dtm_context.context.initialized) { + DRM_INFO("Failed to enable ASSR, DTM TA is not initialized."); + return false; + } + + dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.context.mem_context.shared_buf; + + mutex_lock(&psp->dtm_context.mutex); + memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory)); + + dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_ASSR_ENABLE; + dtm_cmd->dtm_in_message.topology_assr_enable.display_topology_dig_be_index = + link->link_enc_hw_inst; + dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE; + + psp_dtm_invoke(psp, dtm_cmd->cmd_id); + + if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) { + DRM_INFO("Failed to enable ASSR"); + res = false; + } + + mutex_unlock(&psp->dtm_context.mutex); + + return res; +} + +static void update_config(void *handle, struct cp_psp_stream_config *config) +{ + struct hdcp_workqueue *hdcp_work = handle; + struct amdgpu_dm_connector *aconnector = config->dm_stream_ctx; + int link_index = aconnector->dc_link->link_index; + struct mod_hdcp_display *display = &hdcp_work[link_index].display; + struct mod_hdcp_link *link = &hdcp_work[link_index].link; + struct hdcp_workqueue *hdcp_w = &hdcp_work[link_index]; + struct dc_sink *sink = NULL; + bool link_is_hdcp14 = false; + + if (config->dpms_off) { + hdcp_remove_display(hdcp_work, link_index, aconnector); + return; + } + + memset(display, 0, sizeof(*display)); + memset(link, 0, sizeof(*link)); + + display->index = aconnector->base.index; + display->state = MOD_HDCP_DISPLAY_ACTIVE; + + if (aconnector->dc_sink) + sink = aconnector->dc_sink; + else if (aconnector->dc_em_sink) + sink = aconnector->dc_em_sink; + + if (sink) + link->mode = mod_hdcp_signal_type_to_operation_mode(sink->sink_signal); + + display->controller = CONTROLLER_ID_D0 + config->otg_inst; + display->dig_fe = config->dig_fe; + link->dig_be = config->dig_be; + link->ddc_line = aconnector->dc_link->ddc_hw_inst + 1; + display->stream_enc_idx = config->stream_enc_idx; + link->link_enc_idx = config->link_enc_idx; + link->dio_output_id = config->dio_output_idx; + link->phy_idx = config->phy_idx; + + if (sink) + link_is_hdcp14 = dc_link_is_hdcp14(aconnector->dc_link, sink->sink_signal); + link->hdcp_supported_informational = link_is_hdcp14; + link->dp.rev = aconnector->dc_link->dpcd_caps.dpcd_rev.raw; + link->dp.assr_enabled = config->assr_enabled; + link->dp.mst_enabled = config->mst_enabled; + link->dp.dp2_enabled = config->dp2_enabled; + link->dp.usb4_enabled = config->usb4_enabled; + display->adjust.disable = MOD_HDCP_DISPLAY_DISABLE_AUTHENTICATION; + link->adjust.auth_delay = 2; + link->adjust.hdcp1.disable = 0; + hdcp_w->encryption_status[display->index] = MOD_HDCP_ENCRYPTION_STATUS_HDCP_OFF; + + DRM_DEBUG_DRIVER("[HDCP_DM] display %d, CP %d, type %d\n", aconnector->base.index, + (!!aconnector->base.state) ? + aconnector->base.state->content_protection : -1, + (!!aconnector->base.state) ? + aconnector->base.state->hdcp_content_type : -1); + + mutex_lock(&hdcp_w->mutex); + + mod_hdcp_add_display(&hdcp_w->hdcp, link, display, &hdcp_w->output); + + process_output(hdcp_w); + mutex_unlock(&hdcp_w->mutex); + +} + +/** + * DOC: Add sysfs interface for set/get srm + * + * NOTE: From the usermodes prospective you only need to call write *ONCE*, the kernel + * will automatically call once or twice depending on the size + * + * call: "cat file > /sys/class/drm/card0/device/hdcp_srm" from usermode no matter what the size is + * + * The kernel can only send PAGE_SIZE at once and since MAX_SRM_FILE(5120) > PAGE_SIZE(4096), + * srm_data_write can be called multiple times. + * + * sysfs interface doesn't tell us the size we will get so we are sending partial SRMs to psp and on + * the last call we will send the full SRM. PSP will fail on every call before the last. + * + * This means we don't know if the SRM is good until the last call. And because of this + * limitation we cannot throw errors early as it will stop the kernel from writing to sysfs + * + * Example 1: + * Good SRM size = 5096 + * first call to write 4096 -> PSP fails + * Second call to write 1000 -> PSP Pass -> SRM is set + * + * Example 2: + * Bad SRM size = 4096 + * first call to write 4096 -> PSP fails (This is the same as above, but we don't know if this + * is the last call) + * + * Solution?: + * 1: Parse the SRM? -> It is signed so we don't know the EOF + * 2: We can have another sysfs that passes the size before calling set. -> simpler solution + * below + * + * Easy Solution: + * Always call get after Set to verify if set was successful. + * +----------------------+ + * | Why it works: | + * +----------------------+ + * PSP will only update its srm if its older than the one we are trying to load. + * Always do set first than get. + * -if we try to "1. SET" a older version PSP will reject it and we can "2. GET" the newer + * version and save it + * + * -if we try to "1. SET" a newer version PSP will accept it and we can "2. GET" the + * same(newer) version back and save it + * + * -if we try to "1. SET" a newer version and PSP rejects it. That means the format is + * incorrect/corrupted and we should correct our SRM by getting it from PSP + */ +static ssize_t srm_data_write(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, char *buffer, + loff_t pos, size_t count) +{ + struct hdcp_workqueue *work; + u32 srm_version = 0; + + work = container_of(bin_attr, struct hdcp_workqueue, attr); + link_lock(work, true); + + memcpy(work->srm_temp + pos, buffer, count); + + if (!psp_set_srm(work->hdcp.config.psp.handle, work->srm_temp, pos + count, &srm_version)) { + DRM_DEBUG_DRIVER("HDCP SRM SET version 0x%X", srm_version); + memcpy(work->srm, work->srm_temp, pos + count); + work->srm_size = pos + count; + work->srm_version = srm_version; + } + + link_lock(work, false); + + return count; +} + +static ssize_t srm_data_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, char *buffer, + loff_t pos, size_t count) +{ + struct hdcp_workqueue *work; + u8 *srm = NULL; + u32 srm_version; + u32 srm_size; + size_t ret = count; + + work = container_of(bin_attr, struct hdcp_workqueue, attr); + + link_lock(work, true); + + srm = psp_get_srm(work->hdcp.config.psp.handle, &srm_version, &srm_size); + + if (!srm) { + ret = -EINVAL; + goto ret; + } + + if (pos >= srm_size) + ret = 0; + + if (srm_size - pos < count) { + memcpy(buffer, srm + pos, srm_size - pos); + ret = srm_size - pos; + goto ret; + } + + memcpy(buffer, srm + pos, count); + +ret: + link_lock(work, false); + return ret; +} + +/* From the hdcp spec (5.Renewability) SRM needs to be stored in a non-volatile memory. + * + * For example, + * if Application "A" sets the SRM (ver 2) and we reboot/suspend and later when Application "B" + * needs to use HDCP, the version in PSP should be SRM(ver 2). So SRM should be persistent + * across boot/reboots/suspend/resume/shutdown + * + * Currently when the system goes down (suspend/shutdown) the SRM is cleared from PSP. For HDCP + * we need to make the SRM persistent. + * + * -PSP owns the checking of SRM but doesn't have the ability to store it in a non-volatile memory. + * -The kernel cannot write to the file systems. + * -So we need usermode to do this for us, which is why an interface for usermode is needed + * + * + * + * Usermode can read/write to/from PSP using the sysfs interface + * For example: + * to save SRM from PSP to storage : cat /sys/class/drm/card0/device/hdcp_srm > srmfile + * to load from storage to PSP: cat srmfile > /sys/class/drm/card0/device/hdcp_srm + */ +static const struct bin_attribute data_attr = { + .attr = {.name = "hdcp_srm", .mode = 0664}, + .size = PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, /* Limit SRM size */ + .write = srm_data_write, + .read = srm_data_read, +}; + +struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, + struct cp_psp *cp_psp, struct dc *dc) +{ + int max_caps = dc->caps.max_links; + struct hdcp_workqueue *hdcp_work; + int i = 0; + + hdcp_work = kcalloc(max_caps, sizeof(*hdcp_work), GFP_KERNEL); + if (ZERO_OR_NULL_PTR(hdcp_work)) + return NULL; + + hdcp_work->srm = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, + sizeof(*hdcp_work->srm), GFP_KERNEL); + + if (!hdcp_work->srm) + goto fail_alloc_context; + + hdcp_work->srm_temp = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, + sizeof(*hdcp_work->srm_temp), GFP_KERNEL); + + if (!hdcp_work->srm_temp) + goto fail_alloc_context; + + hdcp_work->max_link = max_caps; + + for (i = 0; i < max_caps; i++) { + mutex_init(&hdcp_work[i].mutex); + + INIT_WORK(&hdcp_work[i].cpirq_work, event_cpirq); + INIT_WORK(&hdcp_work[i].property_update_work, event_property_update); + INIT_DELAYED_WORK(&hdcp_work[i].callback_dwork, event_callback); + INIT_DELAYED_WORK(&hdcp_work[i].watchdog_timer_dwork, event_watchdog_timer); + INIT_DELAYED_WORK(&hdcp_work[i].property_validate_dwork, event_property_validate); + + hdcp_work[i].hdcp.config.psp.handle = &adev->psp; + if (dc->ctx->dce_version == DCN_VERSION_3_1 || + dc->ctx->dce_version == DCN_VERSION_3_14 || + dc->ctx->dce_version == DCN_VERSION_3_15 || + dc->ctx->dce_version == DCN_VERSION_3_16) + hdcp_work[i].hdcp.config.psp.caps.dtm_v3_supported = 1; + hdcp_work[i].hdcp.config.ddc.handle = dc_get_link_at_index(dc, i); + hdcp_work[i].hdcp.config.ddc.funcs.write_i2c = lp_write_i2c; + hdcp_work[i].hdcp.config.ddc.funcs.read_i2c = lp_read_i2c; + hdcp_work[i].hdcp.config.ddc.funcs.write_dpcd = lp_write_dpcd; + hdcp_work[i].hdcp.config.ddc.funcs.read_dpcd = lp_read_dpcd; + + memset(hdcp_work[i].aconnector, 0, + sizeof(struct amdgpu_dm_connector *) * + AMDGPU_DM_MAX_DISPLAY_INDEX); + memset(hdcp_work[i].encryption_status, 0, + sizeof(enum mod_hdcp_encryption_status) * + AMDGPU_DM_MAX_DISPLAY_INDEX); + } + + cp_psp->funcs.update_stream_config = update_config; + cp_psp->funcs.enable_assr = enable_assr; + cp_psp->handle = hdcp_work; + + /* File created at /sys/class/drm/card0/device/hdcp_srm*/ + hdcp_work[0].attr = data_attr; + sysfs_bin_attr_init(&hdcp_work[0].attr); + + if (sysfs_create_bin_file(&adev->dev->kobj, &hdcp_work[0].attr)) + DRM_WARN("Failed to create device file hdcp_srm"); + + return hdcp_work; + +fail_alloc_context: + kfree(hdcp_work->srm); + kfree(hdcp_work->srm_temp); + kfree(hdcp_work); + + return NULL; +} + diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h new file mode 100644 index 0000000000..69b445b011 --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h @@ -0,0 +1,89 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef AMDGPU_DM_AMDGPU_DM_HDCP_H_ +#define AMDGPU_DM_AMDGPU_DM_HDCP_H_ + +#include "mod_hdcp.h" +#include "hdcp.h" +#include "dc.h" +#include "dm_cp_psp.h" +#include "amdgpu.h" + +struct mod_hdcp; +struct mod_hdcp_link; +struct mod_hdcp_display; +struct cp_psp; + +struct hdcp_workqueue { + struct work_struct cpirq_work; + struct work_struct property_update_work; + struct delayed_work callback_dwork; + struct delayed_work watchdog_timer_dwork; + struct delayed_work property_validate_dwork; + struct amdgpu_dm_connector *aconnector[AMDGPU_DM_MAX_DISPLAY_INDEX]; + struct mutex mutex; + + struct mod_hdcp hdcp; + struct mod_hdcp_output output; + struct mod_hdcp_display display; + struct mod_hdcp_link link; + + enum mod_hdcp_encryption_status encryption_status[AMDGPU_DM_MAX_DISPLAY_INDEX]; + /* when display is unplugged from mst hub, connctor will be + * destroyed within dm_dp_mst_connector_destroy. connector + * hdcp perperties, like type, undesired, desired, enabled, + * will be lost. So, save hdcp properties into hdcp_work within + * amdgpu_dm_atomic_commit_tail. if the same display is + * plugged back with same display index, its hdcp properties + * will be retrieved from hdcp_work within dm_dp_mst_get_modes + */ + /* un-desired, desired, enabled */ + unsigned int content_protection[AMDGPU_DM_MAX_DISPLAY_INDEX]; + /* hdcp1.x, hdcp2.x */ + unsigned int hdcp_content_type[AMDGPU_DM_MAX_DISPLAY_INDEX]; + + uint8_t max_link; + + uint8_t *srm; + uint8_t *srm_temp; + uint32_t srm_version; + uint32_t srm_size; + struct bin_attribute attr; +}; + +void hdcp_update_display(struct hdcp_workqueue *hdcp_work, + unsigned int link_index, + struct amdgpu_dm_connector *aconnector, + uint8_t content_type, + bool enable_encryption); + +void hdcp_reset_display(struct hdcp_workqueue *work, unsigned int link_index); +void hdcp_handle_cpirq(struct hdcp_workqueue *work, unsigned int link_index); +void hdcp_destroy(struct kobject *kobj, struct hdcp_workqueue *work); + +struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct cp_psp *cp_psp, struct dc *dc); + +#endif /* AMDGPU_DM_AMDGPU_DM_HDCP_H_ */ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c new file mode 100644 index 0000000000..cf0834ae53 --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -0,0 +1,1279 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include <linux/string.h> +#include <linux/acpi.h> +#include <linux/i2c.h> + +#include <drm/drm_atomic.h> +#include <drm/drm_probe_helper.h> +#include <drm/amdgpu_drm.h> +#include <drm/drm_edid.h> + +#include "dm_services.h" +#include "amdgpu.h" +#include "dc.h" +#include "amdgpu_dm.h" +#include "amdgpu_dm_irq.h" +#include "amdgpu_dm_mst_types.h" +#include "dpcd_defs.h" +#include "dc/inc/core_types.h" + +#include "dm_helpers.h" +#include "ddc_service_types.h" + +static u32 edid_extract_panel_id(struct edid *edid) +{ + return (u32)edid->mfg_id[0] << 24 | + (u32)edid->mfg_id[1] << 16 | + (u32)EDID_PRODUCT_ID(edid); +} + +static void apply_edid_quirks(struct edid *edid, struct dc_edid_caps *edid_caps) +{ + uint32_t panel_id = edid_extract_panel_id(edid); + + switch (panel_id) { + /* Workaround for some monitors which does not work well with FAMS */ + case drm_edid_encode_panel_id('S', 'A', 'M', 0x0E5E): + case drm_edid_encode_panel_id('S', 'A', 'M', 0x7053): + case drm_edid_encode_panel_id('S', 'A', 'M', 0x71AC): + DRM_DEBUG_DRIVER("Disabling FAMS on monitor with panel id %X\n", panel_id); + edid_caps->panel_patch.disable_fams = true; + break; + /* Workaround for some monitors that do not clear DPCD 0x317 if FreeSync is unsupported */ + case drm_edid_encode_panel_id('A', 'U', 'O', 0xA7AB): + case drm_edid_encode_panel_id('A', 'U', 'O', 0xE69B): + DRM_DEBUG_DRIVER("Clearing DPCD 0x317 on monitor with panel id %X\n", panel_id); + edid_caps->panel_patch.remove_sink_ext_caps = true; + break; + default: + return; + } +} + +/** + * dm_helpers_parse_edid_caps() - Parse edid caps + * + * @link: current detected link + * @edid: [in] pointer to edid + * @edid_caps: [in] pointer to edid caps + * + * Return: void + */ +enum dc_edid_status dm_helpers_parse_edid_caps( + struct dc_link *link, + const struct dc_edid *edid, + struct dc_edid_caps *edid_caps) +{ + struct amdgpu_dm_connector *aconnector = link->priv; + struct drm_connector *connector = &aconnector->base; + struct edid *edid_buf = edid ? (struct edid *) edid->raw_edid : NULL; + struct cea_sad *sads; + int sad_count = -1; + int sadb_count = -1; + int i = 0; + uint8_t *sadb = NULL; + + enum dc_edid_status result = EDID_OK; + + if (!edid_caps || !edid) + return EDID_BAD_INPUT; + + if (!drm_edid_is_valid(edid_buf)) + result = EDID_BAD_CHECKSUM; + + edid_caps->manufacturer_id = (uint16_t) edid_buf->mfg_id[0] | + ((uint16_t) edid_buf->mfg_id[1])<<8; + edid_caps->product_id = (uint16_t) edid_buf->prod_code[0] | + ((uint16_t) edid_buf->prod_code[1])<<8; + edid_caps->serial_number = edid_buf->serial; + edid_caps->manufacture_week = edid_buf->mfg_week; + edid_caps->manufacture_year = edid_buf->mfg_year; + + drm_edid_get_monitor_name(edid_buf, + edid_caps->display_name, + AUDIO_INFO_DISPLAY_NAME_SIZE_IN_CHARS); + + edid_caps->edid_hdmi = connector->display_info.is_hdmi; + + sad_count = drm_edid_to_sad((struct edid *) edid->raw_edid, &sads); + if (sad_count <= 0) + return result; + + edid_caps->audio_mode_count = min(sad_count, DC_MAX_AUDIO_DESC_COUNT); + for (i = 0; i < edid_caps->audio_mode_count; ++i) { + struct cea_sad *sad = &sads[i]; + + edid_caps->audio_modes[i].format_code = sad->format; + edid_caps->audio_modes[i].channel_count = sad->channels + 1; + edid_caps->audio_modes[i].sample_rate = sad->freq; + edid_caps->audio_modes[i].sample_size = sad->byte2; + } + + sadb_count = drm_edid_to_speaker_allocation((struct edid *) edid->raw_edid, &sadb); + + if (sadb_count < 0) { + DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sadb_count); + sadb_count = 0; + } + + if (sadb_count) + edid_caps->speaker_flags = sadb[0]; + else + edid_caps->speaker_flags = DEFAULT_SPEAKER_LOCATION; + + apply_edid_quirks(edid_buf, edid_caps); + + kfree(sads); + kfree(sadb); + + return result; +} + +static void +fill_dc_mst_payload_table_from_drm(struct dc_link *link, + bool enable, + struct drm_dp_mst_atomic_payload *target_payload, + struct dc_dp_mst_stream_allocation_table *table) +{ + struct dc_dp_mst_stream_allocation_table new_table = { 0 }; + struct dc_dp_mst_stream_allocation *sa; + struct link_mst_stream_allocation_table copy_of_link_table = + link->mst_stream_alloc_table; + + int i; + int current_hw_table_stream_cnt = copy_of_link_table.stream_count; + struct link_mst_stream_allocation *dc_alloc; + + /* TODO: refactor to set link->mst_stream_alloc_table directly if possible.*/ + if (enable) { + dc_alloc = + ©_of_link_table.stream_allocations[current_hw_table_stream_cnt]; + dc_alloc->vcp_id = target_payload->vcpi; + dc_alloc->slot_count = target_payload->time_slots; + } else { + for (i = 0; i < copy_of_link_table.stream_count; i++) { + dc_alloc = + ©_of_link_table.stream_allocations[i]; + + if (dc_alloc->vcp_id == target_payload->vcpi) { + dc_alloc->vcp_id = 0; + dc_alloc->slot_count = 0; + break; + } + } + ASSERT(i != copy_of_link_table.stream_count); + } + + /* Fill payload info*/ + for (i = 0; i < MAX_CONTROLLER_NUM; i++) { + dc_alloc = + ©_of_link_table.stream_allocations[i]; + if (dc_alloc->vcp_id > 0 && dc_alloc->slot_count > 0) { + sa = &new_table.stream_allocations[new_table.stream_count]; + sa->slot_count = dc_alloc->slot_count; + sa->vcp_id = dc_alloc->vcp_id; + new_table.stream_count++; + } + } + + /* Overwrite the old table */ + *table = new_table; +} + +void dm_helpers_dp_update_branch_info( + struct dc_context *ctx, + const struct dc_link *link) +{} + +static void dm_helpers_construct_old_payload( + struct dc_link *link, + int pbn_per_slot, + struct drm_dp_mst_atomic_payload *new_payload, + struct drm_dp_mst_atomic_payload *old_payload) +{ + struct link_mst_stream_allocation_table current_link_table = + link->mst_stream_alloc_table; + struct link_mst_stream_allocation *dc_alloc; + int i; + + *old_payload = *new_payload; + + /* Set correct time_slots/PBN of old payload. + * other fields (delete & dsc_enabled) in + * struct drm_dp_mst_atomic_payload are don't care fields + * while calling drm_dp_remove_payload() + */ + for (i = 0; i < current_link_table.stream_count; i++) { + dc_alloc = + ¤t_link_table.stream_allocations[i]; + + if (dc_alloc->vcp_id == new_payload->vcpi) { + old_payload->time_slots = dc_alloc->slot_count; + old_payload->pbn = dc_alloc->slot_count * pbn_per_slot; + break; + } + } + + /* make sure there is an old payload*/ + ASSERT(i != current_link_table.stream_count); + +} + +/* + * Writes payload allocation table in immediate downstream device. + */ +bool dm_helpers_dp_mst_write_payload_allocation_table( + struct dc_context *ctx, + const struct dc_stream_state *stream, + struct dc_dp_mst_stream_allocation_table *proposed_table, + bool enable) +{ + struct amdgpu_dm_connector *aconnector; + struct drm_dp_mst_topology_state *mst_state; + struct drm_dp_mst_atomic_payload *target_payload, *new_payload, old_payload; + struct drm_dp_mst_topology_mgr *mst_mgr; + + aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; + /* Accessing the connector state is required for vcpi_slots allocation + * and directly relies on behaviour in commit check + * that blocks before commit guaranteeing that the state + * is not gonna be swapped while still in use in commit tail + */ + + if (!aconnector || !aconnector->mst_root) + return false; + + mst_mgr = &aconnector->mst_root->mst_mgr; + mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state); + + /* It's OK for this to fail */ + new_payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port); + + if (enable) { + target_payload = new_payload; + + drm_dp_add_payload_part1(mst_mgr, mst_state, new_payload); + } else { + /* construct old payload by VCPI*/ + dm_helpers_construct_old_payload(stream->link, mst_state->pbn_div, + new_payload, &old_payload); + target_payload = &old_payload; + + drm_dp_remove_payload(mst_mgr, mst_state, &old_payload, new_payload); + } + + /* mst_mgr->->payloads are VC payload notify MST branch using DPCD or + * AUX message. The sequence is slot 1-63 allocated sequence for each + * stream. AMD ASIC stream slot allocation should follow the same + * sequence. copy DRM MST allocation to dc + */ + fill_dc_mst_payload_table_from_drm(stream->link, enable, target_payload, proposed_table); + + return true; +} + +/* + * poll pending down reply + */ +void dm_helpers_dp_mst_poll_pending_down_reply( + struct dc_context *ctx, + const struct dc_link *link) +{} + +/* + * Clear payload allocation table before enable MST DP link. + */ +void dm_helpers_dp_mst_clear_payload_allocation_table( + struct dc_context *ctx, + const struct dc_link *link) +{} + +/* + * Polls for ACT (allocation change trigger) handled and sends + * ALLOCATE_PAYLOAD message. + */ +enum act_return_status dm_helpers_dp_mst_poll_for_allocation_change_trigger( + struct dc_context *ctx, + const struct dc_stream_state *stream) +{ + struct amdgpu_dm_connector *aconnector; + struct drm_dp_mst_topology_mgr *mst_mgr; + int ret; + + aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; + + if (!aconnector || !aconnector->mst_root) + return ACT_FAILED; + + mst_mgr = &aconnector->mst_root->mst_mgr; + + if (!mst_mgr->mst_state) + return ACT_FAILED; + + ret = drm_dp_check_act_status(mst_mgr); + + if (ret) + return ACT_FAILED; + + return ACT_SUCCESS; +} + +bool dm_helpers_dp_mst_send_payload_allocation( + struct dc_context *ctx, + const struct dc_stream_state *stream, + bool enable) +{ + struct amdgpu_dm_connector *aconnector; + struct drm_dp_mst_topology_state *mst_state; + struct drm_dp_mst_topology_mgr *mst_mgr; + struct drm_dp_mst_atomic_payload *payload; + enum mst_progress_status set_flag = MST_ALLOCATE_NEW_PAYLOAD; + enum mst_progress_status clr_flag = MST_CLEAR_ALLOCATED_PAYLOAD; + int ret = 0; + + aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; + + if (!aconnector || !aconnector->mst_root) + return false; + + mst_mgr = &aconnector->mst_root->mst_mgr; + mst_state = to_drm_dp_mst_topology_state(mst_mgr->base.state); + + payload = drm_atomic_get_mst_payload_state(mst_state, aconnector->mst_output_port); + + if (!enable) { + set_flag = MST_CLEAR_ALLOCATED_PAYLOAD; + clr_flag = MST_ALLOCATE_NEW_PAYLOAD; + } + + if (enable) + ret = drm_dp_add_payload_part2(mst_mgr, mst_state->base.state, payload); + + if (ret) { + amdgpu_dm_set_mst_status(&aconnector->mst_status, + set_flag, false); + } else { + amdgpu_dm_set_mst_status(&aconnector->mst_status, + set_flag, true); + amdgpu_dm_set_mst_status(&aconnector->mst_status, + clr_flag, false); + } + + return true; +} + +void dm_dtn_log_begin(struct dc_context *ctx, + struct dc_log_buffer_ctx *log_ctx) +{ + static const char msg[] = "[dtn begin]\n"; + + if (!log_ctx) { + pr_info("%s", msg); + return; + } + + dm_dtn_log_append_v(ctx, log_ctx, "%s", msg); +} + +__printf(3, 4) +void dm_dtn_log_append_v(struct dc_context *ctx, + struct dc_log_buffer_ctx *log_ctx, + const char *msg, ...) +{ + va_list args; + size_t total; + int n; + + if (!log_ctx) { + /* No context, redirect to dmesg. */ + struct va_format vaf; + + vaf.fmt = msg; + vaf.va = &args; + + va_start(args, msg); + pr_info("%pV", &vaf); + va_end(args); + + return; + } + + /* Measure the output. */ + va_start(args, msg); + n = vsnprintf(NULL, 0, msg, args); + va_end(args); + + if (n <= 0) + return; + + /* Reallocate the string buffer as needed. */ + total = log_ctx->pos + n + 1; + + if (total > log_ctx->size) { + char *buf = kvcalloc(total, sizeof(char), GFP_KERNEL); + + if (buf) { + memcpy(buf, log_ctx->buf, log_ctx->pos); + kfree(log_ctx->buf); + + log_ctx->buf = buf; + log_ctx->size = total; + } + } + + if (!log_ctx->buf) + return; + + /* Write the formatted string to the log buffer. */ + va_start(args, msg); + n = vscnprintf( + log_ctx->buf + log_ctx->pos, + log_ctx->size - log_ctx->pos, + msg, + args); + va_end(args); + + if (n > 0) + log_ctx->pos += n; +} + +void dm_dtn_log_end(struct dc_context *ctx, + struct dc_log_buffer_ctx *log_ctx) +{ + static const char msg[] = "[dtn end]\n"; + + if (!log_ctx) { + pr_info("%s", msg); + return; + } + + dm_dtn_log_append_v(ctx, log_ctx, "%s", msg); +} + +bool dm_helpers_dp_mst_start_top_mgr( + struct dc_context *ctx, + const struct dc_link *link, + bool boot) +{ + struct amdgpu_dm_connector *aconnector = link->priv; + int ret; + + if (!aconnector) { + DRM_ERROR("Failed to find connector for link!"); + return false; + } + + if (boot) { + DRM_INFO("DM_MST: Differing MST start on aconnector: %p [id: %d]\n", + aconnector, aconnector->base.base.id); + return true; + } + + DRM_INFO("DM_MST: starting TM on aconnector: %p [id: %d]\n", + aconnector, aconnector->base.base.id); + + ret = drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true); + if (ret < 0) { + DRM_ERROR("DM_MST: Failed to set the device into MST mode!"); + return false; + } + + DRM_INFO("DM_MST: DP%x, %d-lane link detected\n", aconnector->mst_mgr.dpcd[0], + aconnector->mst_mgr.dpcd[2] & DP_MAX_LANE_COUNT_MASK); + + return true; +} + +bool dm_helpers_dp_mst_stop_top_mgr( + struct dc_context *ctx, + struct dc_link *link) +{ + struct amdgpu_dm_connector *aconnector = link->priv; + + if (!aconnector) { + DRM_ERROR("Failed to find connector for link!"); + return false; + } + + DRM_INFO("DM_MST: stopping TM on aconnector: %p [id: %d]\n", + aconnector, aconnector->base.base.id); + + if (aconnector->mst_mgr.mst_state == true) { + drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, false); + link->cur_link_settings.lane_count = 0; + } + + return false; +} + +bool dm_helpers_dp_read_dpcd( + struct dc_context *ctx, + const struct dc_link *link, + uint32_t address, + uint8_t *data, + uint32_t size) +{ + + struct amdgpu_dm_connector *aconnector = link->priv; + + if (!aconnector) { + DC_LOG_DC("Failed to find connector for link!\n"); + return false; + } + + return drm_dp_dpcd_read(&aconnector->dm_dp_aux.aux, address, data, + size) == size; +} + +bool dm_helpers_dp_write_dpcd( + struct dc_context *ctx, + const struct dc_link *link, + uint32_t address, + const uint8_t *data, + uint32_t size) +{ + struct amdgpu_dm_connector *aconnector = link->priv; + + if (!aconnector) { + DRM_ERROR("Failed to find connector for link!"); + return false; + } + + return drm_dp_dpcd_write(&aconnector->dm_dp_aux.aux, + address, (uint8_t *)data, size) > 0; +} + +bool dm_helpers_submit_i2c( + struct dc_context *ctx, + const struct dc_link *link, + struct i2c_command *cmd) +{ + struct amdgpu_dm_connector *aconnector = link->priv; + struct i2c_msg *msgs; + int i = 0; + int num = cmd->number_of_payloads; + bool result; + + if (!aconnector) { + DRM_ERROR("Failed to find connector for link!"); + return false; + } + + msgs = kcalloc(num, sizeof(struct i2c_msg), GFP_KERNEL); + + if (!msgs) + return false; + + for (i = 0; i < num; i++) { + msgs[i].flags = cmd->payloads[i].write ? 0 : I2C_M_RD; + msgs[i].addr = cmd->payloads[i].address; + msgs[i].len = cmd->payloads[i].length; + msgs[i].buf = cmd->payloads[i].data; + } + + result = i2c_transfer(&aconnector->i2c->base, msgs, num) == num; + + kfree(msgs); + + return result; +} + +static bool execute_synaptics_rc_command(struct drm_dp_aux *aux, + bool is_write_cmd, + unsigned char cmd, + unsigned int length, + unsigned int offset, + unsigned char *data) +{ + bool success = false; + unsigned char rc_data[16] = {0}; + unsigned char rc_offset[4] = {0}; + unsigned char rc_length[2] = {0}; + unsigned char rc_cmd = 0; + unsigned char rc_result = 0xFF; + unsigned char i = 0; + int ret; + + if (is_write_cmd) { + // write rc data + memmove(rc_data, data, length); + ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_DATA, rc_data, sizeof(rc_data)); + } + + // write rc offset + rc_offset[0] = (unsigned char) offset & 0xFF; + rc_offset[1] = (unsigned char) (offset >> 8) & 0xFF; + rc_offset[2] = (unsigned char) (offset >> 16) & 0xFF; + rc_offset[3] = (unsigned char) (offset >> 24) & 0xFF; + ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_OFFSET, rc_offset, sizeof(rc_offset)); + + // write rc length + rc_length[0] = (unsigned char) length & 0xFF; + rc_length[1] = (unsigned char) (length >> 8) & 0xFF; + ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_LENGTH, rc_length, sizeof(rc_length)); + + // write rc cmd + rc_cmd = cmd | 0x80; + ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_COMMAND, &rc_cmd, sizeof(rc_cmd)); + + if (ret < 0) { + DRM_ERROR("%s: write cmd ..., err = %d\n", __func__, ret); + return false; + } + + // poll until active is 0 + for (i = 0; i < 10; i++) { + drm_dp_dpcd_read(aux, SYNAPTICS_RC_COMMAND, &rc_cmd, sizeof(rc_cmd)); + if (rc_cmd == cmd) + // active is 0 + break; + msleep(10); + } + + // read rc result + drm_dp_dpcd_read(aux, SYNAPTICS_RC_RESULT, &rc_result, sizeof(rc_result)); + success = (rc_result == 0); + + if (success && !is_write_cmd) { + // read rc data + drm_dp_dpcd_read(aux, SYNAPTICS_RC_DATA, data, length); + } + + DC_LOG_DC("%s: success = %d\n", __func__, success); + + return success; +} + +static void apply_synaptics_fifo_reset_wa(struct drm_dp_aux *aux) +{ + unsigned char data[16] = {0}; + + DC_LOG_DC("Start %s\n", __func__); + + // Step 2 + data[0] = 'P'; + data[1] = 'R'; + data[2] = 'I'; + data[3] = 'U'; + data[4] = 'S'; + + if (!execute_synaptics_rc_command(aux, true, 0x01, 5, 0, data)) + return; + + // Step 3 and 4 + if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x220998, data)) + return; + + data[0] &= (~(1 << 1)); // set bit 1 to 0 + if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x220998, data)) + return; + + if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x220D98, data)) + return; + + data[0] &= (~(1 << 1)); // set bit 1 to 0 + if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x220D98, data)) + return; + + if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x221198, data)) + return; + + data[0] &= (~(1 << 1)); // set bit 1 to 0 + if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x221198, data)) + return; + + // Step 3 and 5 + if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x220998, data)) + return; + + data[0] |= (1 << 1); // set bit 1 to 1 + if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x220998, data)) + return; + + if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x220D98, data)) + return; + + data[0] |= (1 << 1); // set bit 1 to 1 + + if (!execute_synaptics_rc_command(aux, false, 0x31, 4, 0x221198, data)) + return; + + data[0] |= (1 << 1); // set bit 1 to 1 + if (!execute_synaptics_rc_command(aux, true, 0x21, 4, 0x221198, data)) + return; + + // Step 6 + if (!execute_synaptics_rc_command(aux, true, 0x02, 0, 0, NULL)) + return; + + DC_LOG_DC("Done %s\n", __func__); +} + +/* MST Dock */ +static const uint8_t SYNAPTICS_DEVICE_ID[] = "SYNA"; + +static uint8_t write_dsc_enable_synaptics_non_virtual_dpcd_mst( + struct drm_dp_aux *aux, + const struct dc_stream_state *stream, + bool enable) +{ + uint8_t ret = 0; + + DC_LOG_DC("Configure DSC to non-virtual dpcd synaptics\n"); + + if (enable) { + /* When DSC is enabled on previous boot and reboot with the hub, + * there is a chance that Synaptics hub gets stuck during reboot sequence. + * Applying a workaround to reset Synaptics SDP fifo before enabling the first stream + */ + if (!stream->link->link_status.link_active && + memcmp(stream->link->dpcd_caps.branch_dev_name, + (int8_t *)SYNAPTICS_DEVICE_ID, 4) == 0) + apply_synaptics_fifo_reset_wa(aux); + + ret = drm_dp_dpcd_write(aux, DP_DSC_ENABLE, &enable, 1); + DRM_INFO("Send DSC enable to synaptics\n"); + + } else { + /* Synaptics hub not support virtual dpcd, + * external monitor occur garbage while disable DSC, + * Disable DSC only when entire link status turn to false, + */ + if (!stream->link->link_status.link_active) { + ret = drm_dp_dpcd_write(aux, DP_DSC_ENABLE, &enable, 1); + DRM_INFO("Send DSC disable to synaptics\n"); + } + } + + return ret; +} + +bool dm_helpers_dp_write_dsc_enable( + struct dc_context *ctx, + const struct dc_stream_state *stream, + bool enable) +{ + static const uint8_t DSC_DISABLE; + static const uint8_t DSC_DECODING = 0x01; + static const uint8_t DSC_PASSTHROUGH = 0x02; + + struct amdgpu_dm_connector *aconnector; + struct drm_dp_mst_port *port; + uint8_t enable_dsc = enable ? DSC_DECODING : DSC_DISABLE; + uint8_t enable_passthrough = enable ? DSC_PASSTHROUGH : DSC_DISABLE; + uint8_t ret = 0; + + if (!stream) + return false; + + if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { + aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; + + if (!aconnector->dsc_aux) + return false; + + // apply w/a to synaptics + if (needs_dsc_aux_workaround(aconnector->dc_link) && + (aconnector->mst_downstream_port_present.byte & 0x7) != 0x3) + return write_dsc_enable_synaptics_non_virtual_dpcd_mst( + aconnector->dsc_aux, stream, enable_dsc); + + port = aconnector->mst_output_port; + + if (enable) { + if (port->passthrough_aux) { + ret = drm_dp_dpcd_write(port->passthrough_aux, + DP_DSC_ENABLE, + &enable_passthrough, 1); + DC_LOG_DC("Sent DSC pass-through enable to virtual dpcd port, ret = %u\n", + ret); + } + + ret = drm_dp_dpcd_write(aconnector->dsc_aux, + DP_DSC_ENABLE, &enable_dsc, 1); + DC_LOG_DC("Sent DSC decoding enable to %s port, ret = %u\n", + (port->passthrough_aux) ? "remote RX" : + "virtual dpcd", + ret); + } else { + ret = drm_dp_dpcd_write(aconnector->dsc_aux, + DP_DSC_ENABLE, &enable_dsc, 1); + DC_LOG_DC("Sent DSC decoding disable to %s port, ret = %u\n", + (port->passthrough_aux) ? "remote RX" : + "virtual dpcd", + ret); + + if (port->passthrough_aux) { + ret = drm_dp_dpcd_write(port->passthrough_aux, + DP_DSC_ENABLE, + &enable_passthrough, 1); + DC_LOG_DC("Sent DSC pass-through disable to virtual dpcd port, ret = %u\n", + ret); + } + } + } + + if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT || stream->signal == SIGNAL_TYPE_EDP) { + if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_NONE) { + ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1); + DC_LOG_DC("Send DSC %s to SST RX\n", enable_dsc ? "enable" : "disable"); + } else if (stream->sink->link->dpcd_caps.dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER) { + ret = dm_helpers_dp_write_dpcd(ctx, stream->link, DP_DSC_ENABLE, &enable_dsc, 1); + DC_LOG_DC("Send DSC %s to DP-HDMI PCON\n", enable_dsc ? "enable" : "disable"); + } + } + + return ret; +} + +bool dm_helpers_is_dp_sink_present(struct dc_link *link) +{ + bool dp_sink_present; + struct amdgpu_dm_connector *aconnector = link->priv; + + if (!aconnector) { + BUG_ON("Failed to find connector for link!"); + return true; + } + + mutex_lock(&aconnector->dm_dp_aux.aux.hw_mutex); + dp_sink_present = dc_link_is_dp_sink_present(link); + mutex_unlock(&aconnector->dm_dp_aux.aux.hw_mutex); + return dp_sink_present; +} + +enum dc_edid_status dm_helpers_read_local_edid( + struct dc_context *ctx, + struct dc_link *link, + struct dc_sink *sink) +{ + struct amdgpu_dm_connector *aconnector = link->priv; + struct drm_connector *connector = &aconnector->base; + struct i2c_adapter *ddc; + int retry = 3; + enum dc_edid_status edid_status; + struct edid *edid; + + if (link->aux_mode) + ddc = &aconnector->dm_dp_aux.aux.ddc; + else + ddc = &aconnector->i2c->base; + + /* some dongles read edid incorrectly the first time, + * do check sum and retry to make sure read correct edid. + */ + do { + + edid = drm_get_edid(&aconnector->base, ddc); + + /* DP Compliance Test 4.2.2.6 */ + if (link->aux_mode && connector->edid_corrupt) + drm_dp_send_real_edid_checksum(&aconnector->dm_dp_aux.aux, connector->real_edid_checksum); + + if (!edid && connector->edid_corrupt) { + connector->edid_corrupt = false; + return EDID_BAD_CHECKSUM; + } + + if (!edid) + return EDID_NO_RESPONSE; + + sink->dc_edid.length = EDID_LENGTH * (edid->extensions + 1); + memmove(sink->dc_edid.raw_edid, (uint8_t *)edid, sink->dc_edid.length); + + /* We don't need the original edid anymore */ + kfree(edid); + + edid_status = dm_helpers_parse_edid_caps( + link, + &sink->dc_edid, + &sink->edid_caps); + + } while (edid_status == EDID_BAD_CHECKSUM && --retry > 0); + + if (edid_status != EDID_OK) + DRM_ERROR("EDID err: %d, on connector: %s", + edid_status, + aconnector->base.name); + if (link->aux_mode) { + union test_request test_request = {0}; + union test_response test_response = {0}; + + dm_helpers_dp_read_dpcd(ctx, + link, + DP_TEST_REQUEST, + &test_request.raw, + sizeof(union test_request)); + + if (!test_request.bits.EDID_READ) + return edid_status; + + test_response.bits.EDID_CHECKSUM_WRITE = 1; + + dm_helpers_dp_write_dpcd(ctx, + link, + DP_TEST_EDID_CHECKSUM, + &sink->dc_edid.raw_edid[sink->dc_edid.length-1], + 1); + + dm_helpers_dp_write_dpcd(ctx, + link, + DP_TEST_RESPONSE, + &test_response.raw, + sizeof(test_response)); + + } + + return edid_status; +} +int dm_helper_dmub_aux_transfer_sync( + struct dc_context *ctx, + const struct dc_link *link, + struct aux_payload *payload, + enum aux_return_code_type *operation_result) +{ + if (!link->hpd_status) { + *operation_result = AUX_RET_ERROR_HPD_DISCON; + return -1; + } + + return amdgpu_dm_process_dmub_aux_transfer_sync(ctx, link->link_index, payload, + operation_result); +} + +int dm_helpers_dmub_set_config_sync(struct dc_context *ctx, + const struct dc_link *link, + struct set_config_cmd_payload *payload, + enum set_config_status *operation_result) +{ + return amdgpu_dm_process_dmub_set_config_sync(ctx, link->link_index, payload, + operation_result); +} + +void dm_set_dcn_clocks(struct dc_context *ctx, struct dc_clocks *clks) +{ + /* TODO: something */ +} + +void dm_helpers_smu_timeout(struct dc_context *ctx, unsigned int msg_id, unsigned int param, unsigned int timeout_us) +{ + // TODO: + //amdgpu_device_gpu_recover(dc_context->driver-context, NULL); +} + +void dm_helpers_init_panel_settings( + struct dc_context *ctx, + struct dc_panel_config *panel_config, + struct dc_sink *sink) +{ + // Extra Panel Power Sequence + panel_config->pps.extra_t3_ms = sink->edid_caps.panel_patch.extra_t3_ms; + panel_config->pps.extra_t7_ms = sink->edid_caps.panel_patch.extra_t7_ms; + panel_config->pps.extra_delay_backlight_off = sink->edid_caps.panel_patch.extra_delay_backlight_off; + panel_config->pps.extra_post_t7_ms = 0; + panel_config->pps.extra_pre_t11_ms = 0; + panel_config->pps.extra_t12_ms = sink->edid_caps.panel_patch.extra_t12_ms; + panel_config->pps.extra_post_OUI_ms = 0; + // Feature DSC + panel_config->dsc.disable_dsc_edp = false; + panel_config->dsc.force_dsc_edp_policy = 0; +} + +void dm_helpers_override_panel_settings( + struct dc_context *ctx, + struct dc_panel_config *panel_config) +{ + // Feature DSC + if (amdgpu_dc_debug_mask & DC_DISABLE_DSC) + panel_config->dsc.disable_dsc_edp = true; +} + +void *dm_helpers_allocate_gpu_mem( + struct dc_context *ctx, + enum dc_gpu_mem_alloc_type type, + size_t size, + long long *addr) +{ + struct amdgpu_device *adev = ctx->driver_context; + struct dal_allocation *da; + u32 domain = (type == DC_MEM_ALLOC_TYPE_GART) ? + AMDGPU_GEM_DOMAIN_GTT : AMDGPU_GEM_DOMAIN_VRAM; + int ret; + + da = kzalloc(sizeof(struct dal_allocation), GFP_KERNEL); + if (!da) + return NULL; + + ret = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE, + domain, &da->bo, + &da->gpu_addr, &da->cpu_ptr); + + *addr = da->gpu_addr; + + if (ret) { + kfree(da); + return NULL; + } + + /* add da to list in dm */ + list_add(&da->list, &adev->dm.da_list); + + return da->cpu_ptr; +} + +void dm_helpers_free_gpu_mem( + struct dc_context *ctx, + enum dc_gpu_mem_alloc_type type, + void *pvMem) +{ + struct amdgpu_device *adev = ctx->driver_context; + struct dal_allocation *da; + + /* walk the da list in DM */ + list_for_each_entry(da, &adev->dm.da_list, list) { + if (pvMem == da->cpu_ptr) { + amdgpu_bo_free_kernel(&da->bo, &da->gpu_addr, &da->cpu_ptr); + list_del(&da->list); + kfree(da); + break; + } + } +} + +bool dm_helpers_dmub_outbox_interrupt_control(struct dc_context *ctx, bool enable) +{ + enum dc_irq_source irq_source; + bool ret; + + irq_source = DC_IRQ_SOURCE_DMCUB_OUTBOX; + + ret = dc_interrupt_set(ctx->dc, irq_source, enable); + + DRM_DEBUG_DRIVER("Dmub trace irq %sabling: r=%d\n", + enable ? "en" : "dis", ret); + return ret; +} + +void dm_helpers_mst_enable_stream_features(const struct dc_stream_state *stream) +{ + /* TODO: virtual DPCD */ + struct dc_link *link = stream->link; + union down_spread_ctrl old_downspread; + union down_spread_ctrl new_downspread; + + if (link->aux_access_disabled) + return; + + if (!dm_helpers_dp_read_dpcd(link->ctx, link, DP_DOWNSPREAD_CTRL, + &old_downspread.raw, + sizeof(old_downspread))) + return; + + new_downspread.raw = old_downspread.raw; + new_downspread.bits.IGNORE_MSA_TIMING_PARAM = + (stream->ignore_msa_timing_param) ? 1 : 0; + + if (new_downspread.raw != old_downspread.raw) + dm_helpers_dp_write_dpcd(link->ctx, link, DP_DOWNSPREAD_CTRL, + &new_downspread.raw, + sizeof(new_downspread)); +} + +bool dm_helpers_dp_handle_test_pattern_request( + struct dc_context *ctx, + const struct dc_link *link, + union link_test_pattern dpcd_test_pattern, + union test_misc dpcd_test_params) +{ + enum dp_test_pattern test_pattern; + enum dp_test_pattern_color_space test_pattern_color_space = + DP_TEST_PATTERN_COLOR_SPACE_UNDEFINED; + enum dc_color_depth requestColorDepth = COLOR_DEPTH_UNDEFINED; + enum dc_pixel_encoding requestPixelEncoding = PIXEL_ENCODING_UNDEFINED; + struct pipe_ctx *pipes = link->dc->current_state->res_ctx.pipe_ctx; + struct pipe_ctx *pipe_ctx = NULL; + struct amdgpu_dm_connector *aconnector = link->priv; + int i; + + for (i = 0; i < MAX_PIPES; i++) { + if (pipes[i].stream == NULL) + continue; + + if (pipes[i].stream->link == link && !pipes[i].top_pipe && + !pipes[i].prev_odm_pipe) { + pipe_ctx = &pipes[i]; + break; + } + } + + if (pipe_ctx == NULL) + return false; + + switch (dpcd_test_pattern.bits.PATTERN) { + case LINK_TEST_PATTERN_COLOR_RAMP: + test_pattern = DP_TEST_PATTERN_COLOR_RAMP; + break; + case LINK_TEST_PATTERN_VERTICAL_BARS: + test_pattern = DP_TEST_PATTERN_VERTICAL_BARS; + break; /* black and white */ + case LINK_TEST_PATTERN_COLOR_SQUARES: + test_pattern = (dpcd_test_params.bits.DYN_RANGE == + TEST_DYN_RANGE_VESA ? + DP_TEST_PATTERN_COLOR_SQUARES : + DP_TEST_PATTERN_COLOR_SQUARES_CEA); + break; + default: + test_pattern = DP_TEST_PATTERN_VIDEO_MODE; + break; + } + + if (dpcd_test_params.bits.CLR_FORMAT == 0) + test_pattern_color_space = DP_TEST_PATTERN_COLOR_SPACE_RGB; + else + test_pattern_color_space = dpcd_test_params.bits.YCBCR_COEFS ? + DP_TEST_PATTERN_COLOR_SPACE_YCBCR709 : + DP_TEST_PATTERN_COLOR_SPACE_YCBCR601; + + switch (dpcd_test_params.bits.BPC) { + case 0: // 6 bits + requestColorDepth = COLOR_DEPTH_666; + break; + case 1: // 8 bits + requestColorDepth = COLOR_DEPTH_888; + break; + case 2: // 10 bits + requestColorDepth = COLOR_DEPTH_101010; + break; + case 3: // 12 bits + requestColorDepth = COLOR_DEPTH_121212; + break; + default: + break; + } + + switch (dpcd_test_params.bits.CLR_FORMAT) { + case 0: + requestPixelEncoding = PIXEL_ENCODING_RGB; + break; + case 1: + requestPixelEncoding = PIXEL_ENCODING_YCBCR422; + break; + case 2: + requestPixelEncoding = PIXEL_ENCODING_YCBCR444; + break; + default: + requestPixelEncoding = PIXEL_ENCODING_RGB; + break; + } + + if ((requestColorDepth != COLOR_DEPTH_UNDEFINED + && pipe_ctx->stream->timing.display_color_depth != requestColorDepth) + || (requestPixelEncoding != PIXEL_ENCODING_UNDEFINED + && pipe_ctx->stream->timing.pixel_encoding != requestPixelEncoding)) { + DC_LOG_DEBUG("%s: original bpc %d pix encoding %d, changing to %d %d\n", + __func__, + pipe_ctx->stream->timing.display_color_depth, + pipe_ctx->stream->timing.pixel_encoding, + requestColorDepth, + requestPixelEncoding); + pipe_ctx->stream->timing.display_color_depth = requestColorDepth; + pipe_ctx->stream->timing.pixel_encoding = requestPixelEncoding; + + dc_link_update_dsc_config(pipe_ctx); + + aconnector->timing_changed = true; + /* store current timing */ + if (aconnector->timing_requested) + *aconnector->timing_requested = pipe_ctx->stream->timing; + else + DC_LOG_ERROR("%s: timing storage failed\n", __func__); + + } + + dc_link_dp_set_test_pattern( + (struct dc_link *) link, + test_pattern, + test_pattern_color_space, + NULL, + NULL, + 0); + + return false; +} + +void dm_set_phyd32clk(struct dc_context *ctx, int freq_khz) +{ + // TODO +} + +void dm_helpers_enable_periodic_detection(struct dc_context *ctx, bool enable) +{ + /* TODO: add periodic detection implementation */ +} + +void dm_helpers_dp_mst_update_branch_bandwidth( + struct dc_context *ctx, + struct dc_link *link) +{ + // TODO +} + +static bool dm_is_freesync_pcon_whitelist(const uint32_t branch_dev_id) +{ + bool ret_val = false; + + switch (branch_dev_id) { + case DP_BRANCH_DEVICE_ID_0060AD: + case DP_BRANCH_DEVICE_ID_00E04C: + case DP_BRANCH_DEVICE_ID_90CC24: + ret_val = true; + break; + default: + break; + } + + return ret_val; +} + +enum adaptive_sync_type dm_get_adaptive_sync_support_type(struct dc_link *link) +{ + struct dpcd_caps *dpcd_caps = &link->dpcd_caps; + enum adaptive_sync_type as_type = ADAPTIVE_SYNC_TYPE_NONE; + + switch (dpcd_caps->dongle_type) { + case DISPLAY_DONGLE_DP_HDMI_CONVERTER: + if (dpcd_caps->adaptive_sync_caps.dp_adap_sync_caps.bits.ADAPTIVE_SYNC_SDP_SUPPORT == true && + dpcd_caps->allow_invalid_MSA_timing_param == true && + dm_is_freesync_pcon_whitelist(dpcd_caps->branch_dev_id)) + as_type = FREESYNC_TYPE_PCON_IN_WHITELIST; + break; + default: + break; + } + + return as_type; +} diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c new file mode 100644 index 0000000000..51467f132c --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.c @@ -0,0 +1,950 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dm_services_types.h" +#include "dc.h" + +#include "amdgpu.h" +#include "amdgpu_dm.h" +#include "amdgpu_dm_irq.h" + +/** + * DOC: overview + * + * DM provides another layer of IRQ management on top of what the base driver + * already provides. This is something that could be cleaned up, and is a + * future TODO item. + * + * The base driver provides IRQ source registration with DRM, handler + * registration into the base driver's IRQ table, and a handler callback + * amdgpu_irq_handler(), with which DRM calls on interrupts. This generic + * handler looks up the IRQ table, and calls the respective + * &amdgpu_irq_src_funcs.process hookups. + * + * What DM provides on top are two IRQ tables specifically for top-half and + * bottom-half IRQ handling, with the bottom-half implementing workqueues: + * + * - &amdgpu_display_manager.irq_handler_list_high_tab + * - &amdgpu_display_manager.irq_handler_list_low_tab + * + * They override the base driver's IRQ table, and the effect can be seen + * in the hooks that DM provides for &amdgpu_irq_src_funcs.process. They + * are all set to the DM generic handler amdgpu_dm_irq_handler(), which looks up + * DM's IRQ tables. However, in order for base driver to recognize this hook, DM + * still needs to register the IRQ with the base driver. See + * dce110_register_irq_handlers() and dcn10_register_irq_handlers(). + * + * To expose DC's hardware interrupt toggle to the base driver, DM implements + * &amdgpu_irq_src_funcs.set hooks. Base driver calls it through + * amdgpu_irq_update() to enable or disable the interrupt. + */ + +/****************************************************************************** + * Private declarations. + *****************************************************************************/ + +/** + * struct amdgpu_dm_irq_handler_data - Data for DM interrupt handlers. + * + * @list: Linked list entry referencing the next/previous handler + * @handler: Handler function + * @handler_arg: Argument passed to the handler when triggered + * @dm: DM which this handler belongs to + * @irq_source: DC interrupt source that this handler is registered for + * @work: work struct + */ +struct amdgpu_dm_irq_handler_data { + struct list_head list; + interrupt_handler handler; + void *handler_arg; + + struct amdgpu_display_manager *dm; + /* DAL irq source which registered for this interrupt. */ + enum dc_irq_source irq_source; + struct work_struct work; +}; + +#define DM_IRQ_TABLE_LOCK(adev, flags) \ + spin_lock_irqsave(&adev->dm.irq_handler_list_table_lock, flags) + +#define DM_IRQ_TABLE_UNLOCK(adev, flags) \ + spin_unlock_irqrestore(&adev->dm.irq_handler_list_table_lock, flags) + +/****************************************************************************** + * Private functions. + *****************************************************************************/ + +static void init_handler_common_data(struct amdgpu_dm_irq_handler_data *hcd, + void (*ih)(void *), + void *args, + struct amdgpu_display_manager *dm) +{ + hcd->handler = ih; + hcd->handler_arg = args; + hcd->dm = dm; +} + +/** + * dm_irq_work_func() - Handle an IRQ outside of the interrupt handler proper. + * + * @work: work struct + */ +static void dm_irq_work_func(struct work_struct *work) +{ + struct amdgpu_dm_irq_handler_data *handler_data = + container_of(work, struct amdgpu_dm_irq_handler_data, work); + + handler_data->handler(handler_data->handler_arg); + + /* Call a DAL subcomponent which registered for interrupt notification + * at INTERRUPT_LOW_IRQ_CONTEXT. + * (The most common use is HPD interrupt) + */ +} + +/* + * Remove a handler and return a pointer to handler list from which the + * handler was removed. + */ +static struct list_head *remove_irq_handler(struct amdgpu_device *adev, + void *ih, + const struct dc_interrupt_params *int_params) +{ + struct list_head *hnd_list; + struct list_head *entry, *tmp; + struct amdgpu_dm_irq_handler_data *handler; + unsigned long irq_table_flags; + bool handler_removed = false; + enum dc_irq_source irq_source; + + DM_IRQ_TABLE_LOCK(adev, irq_table_flags); + + irq_source = int_params->irq_source; + + switch (int_params->int_context) { + case INTERRUPT_HIGH_IRQ_CONTEXT: + hnd_list = &adev->dm.irq_handler_list_high_tab[irq_source]; + break; + case INTERRUPT_LOW_IRQ_CONTEXT: + default: + hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source]; + break; + } + + list_for_each_safe(entry, tmp, hnd_list) { + + handler = list_entry(entry, struct amdgpu_dm_irq_handler_data, + list); + + if (handler == NULL) + continue; + + if (ih == handler->handler) { + /* Found our handler. Remove it from the list. */ + list_del(&handler->list); + handler_removed = true; + break; + } + } + + DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); + + if (handler_removed == false) { + /* Not necessarily an error - caller may not + * know the context. + */ + return NULL; + } + + kfree(handler); + + DRM_DEBUG_KMS( + "DM_IRQ: removed irq handler: %p for: dal_src=%d, irq context=%d\n", + ih, int_params->irq_source, int_params->int_context); + + return hnd_list; +} + +/** + * unregister_all_irq_handlers() - Cleans up handlers from the DM IRQ table + * @adev: The base driver device containing the DM device + * + * Go through low and high context IRQ tables and deallocate handlers. + */ +static void unregister_all_irq_handlers(struct amdgpu_device *adev) +{ + struct list_head *hnd_list_low; + struct list_head *hnd_list_high; + struct list_head *entry, *tmp; + struct amdgpu_dm_irq_handler_data *handler; + unsigned long irq_table_flags; + int i; + + DM_IRQ_TABLE_LOCK(adev, irq_table_flags); + + for (i = 0; i < DAL_IRQ_SOURCES_NUMBER; i++) { + hnd_list_low = &adev->dm.irq_handler_list_low_tab[i]; + hnd_list_high = &adev->dm.irq_handler_list_high_tab[i]; + + list_for_each_safe(entry, tmp, hnd_list_low) { + + handler = list_entry(entry, struct amdgpu_dm_irq_handler_data, + list); + + if (handler == NULL || handler->handler == NULL) + continue; + + list_del(&handler->list); + kfree(handler); + } + + list_for_each_safe(entry, tmp, hnd_list_high) { + + handler = list_entry(entry, struct amdgpu_dm_irq_handler_data, + list); + + if (handler == NULL || handler->handler == NULL) + continue; + + list_del(&handler->list); + kfree(handler); + } + } + + DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); +} + +static bool +validate_irq_registration_params(struct dc_interrupt_params *int_params, + void (*ih)(void *)) +{ + if (NULL == int_params || NULL == ih) { + DRM_ERROR("DM_IRQ: invalid input!\n"); + return false; + } + + if (int_params->int_context >= INTERRUPT_CONTEXT_NUMBER) { + DRM_ERROR("DM_IRQ: invalid context: %d!\n", + int_params->int_context); + return false; + } + + if (!DAL_VALID_IRQ_SRC_NUM(int_params->irq_source)) { + DRM_ERROR("DM_IRQ: invalid irq_source: %d!\n", + int_params->irq_source); + return false; + } + + return true; +} + +static bool validate_irq_unregistration_params(enum dc_irq_source irq_source, + irq_handler_idx handler_idx) +{ + if (handler_idx == DAL_INVALID_IRQ_HANDLER_IDX) { + DRM_ERROR("DM_IRQ: invalid handler_idx==NULL!\n"); + return false; + } + + if (!DAL_VALID_IRQ_SRC_NUM(irq_source)) { + DRM_ERROR("DM_IRQ: invalid irq_source:%d!\n", irq_source); + return false; + } + + return true; +} +/****************************************************************************** + * Public functions. + * + * Note: caller is responsible for input validation. + *****************************************************************************/ + +/** + * amdgpu_dm_irq_register_interrupt() - Register a handler within DM. + * @adev: The base driver device containing the DM device. + * @int_params: Interrupt parameters containing the source, and handler context + * @ih: Function pointer to the interrupt handler to register + * @handler_args: Arguments passed to the handler when the interrupt occurs + * + * Register an interrupt handler for the given IRQ source, under the given + * context. The context can either be high or low. High context handlers are + * executed directly within ISR context, while low context is executed within a + * workqueue, thereby allowing operations that sleep. + * + * Registered handlers are called in a FIFO manner, i.e. the most recently + * registered handler will be called first. + * + * Return: Handler data &struct amdgpu_dm_irq_handler_data containing the IRQ + * source, handler function, and args + */ +void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev, + struct dc_interrupt_params *int_params, + void (*ih)(void *), + void *handler_args) +{ + struct list_head *hnd_list; + struct amdgpu_dm_irq_handler_data *handler_data; + unsigned long irq_table_flags; + enum dc_irq_source irq_source; + + if (false == validate_irq_registration_params(int_params, ih)) + return DAL_INVALID_IRQ_HANDLER_IDX; + + handler_data = kzalloc(sizeof(*handler_data), GFP_KERNEL); + if (!handler_data) { + DRM_ERROR("DM_IRQ: failed to allocate irq handler!\n"); + return DAL_INVALID_IRQ_HANDLER_IDX; + } + + init_handler_common_data(handler_data, ih, handler_args, &adev->dm); + + irq_source = int_params->irq_source; + + handler_data->irq_source = irq_source; + + /* Lock the list, add the handler. */ + DM_IRQ_TABLE_LOCK(adev, irq_table_flags); + + switch (int_params->int_context) { + case INTERRUPT_HIGH_IRQ_CONTEXT: + hnd_list = &adev->dm.irq_handler_list_high_tab[irq_source]; + break; + case INTERRUPT_LOW_IRQ_CONTEXT: + default: + hnd_list = &adev->dm.irq_handler_list_low_tab[irq_source]; + INIT_WORK(&handler_data->work, dm_irq_work_func); + break; + } + + list_add_tail(&handler_data->list, hnd_list); + + DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); + + /* This pointer will be stored by code which requested interrupt + * registration. + * The same pointer will be needed in order to unregister the + * interrupt. + */ + + DRM_DEBUG_KMS( + "DM_IRQ: added irq handler: %p for: dal_src=%d, irq context=%d\n", + handler_data, + irq_source, + int_params->int_context); + + return handler_data; +} + +/** + * amdgpu_dm_irq_unregister_interrupt() - Remove a handler from the DM IRQ table + * @adev: The base driver device containing the DM device + * @irq_source: IRQ source to remove the given handler from + * @ih: Function pointer to the interrupt handler to unregister + * + * Go through both low and high context IRQ tables, and find the given handler + * for the given irq source. If found, remove it. Otherwise, do nothing. + */ +void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev, + enum dc_irq_source irq_source, + void *ih) +{ + struct list_head *handler_list; + struct dc_interrupt_params int_params; + int i; + + if (false == validate_irq_unregistration_params(irq_source, ih)) + return; + + memset(&int_params, 0, sizeof(int_params)); + + int_params.irq_source = irq_source; + + for (i = 0; i < INTERRUPT_CONTEXT_NUMBER; i++) { + + int_params.int_context = i; + + handler_list = remove_irq_handler(adev, ih, &int_params); + + if (handler_list != NULL) + break; + } + + if (handler_list == NULL) { + /* If we got here, it means we searched all irq contexts + * for this irq source, but the handler was not found. + */ + DRM_ERROR( + "DM_IRQ: failed to find irq handler:%p for irq_source:%d!\n", + ih, irq_source); + } +} + +/** + * amdgpu_dm_irq_init() - Initialize DM IRQ management + * @adev: The base driver device containing the DM device + * + * Initialize DM's high and low context IRQ tables. + * + * The N by M table contains N IRQ sources, with M + * &struct amdgpu_dm_irq_handler_data hooked together in a linked list. The + * list_heads are initialized here. When an interrupt n is triggered, all m + * handlers are called in sequence, FIFO according to registration order. + * + * The low context table requires special steps to initialize, since handlers + * will be deferred to a workqueue. See &struct irq_list_head. + */ +int amdgpu_dm_irq_init(struct amdgpu_device *adev) +{ + int src; + struct list_head *lh; + + DRM_DEBUG_KMS("DM_IRQ\n"); + + spin_lock_init(&adev->dm.irq_handler_list_table_lock); + + for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) { + /* low context handler list init */ + lh = &adev->dm.irq_handler_list_low_tab[src]; + INIT_LIST_HEAD(lh); + /* high context handler init */ + INIT_LIST_HEAD(&adev->dm.irq_handler_list_high_tab[src]); + } + + return 0; +} + +/** + * amdgpu_dm_irq_fini() - Tear down DM IRQ management + * @adev: The base driver device containing the DM device + * + * Flush all work within the low context IRQ table. + */ +void amdgpu_dm_irq_fini(struct amdgpu_device *adev) +{ + int src; + struct list_head *lh; + struct list_head *entry, *tmp; + struct amdgpu_dm_irq_handler_data *handler; + unsigned long irq_table_flags; + + DRM_DEBUG_KMS("DM_IRQ: releasing resources.\n"); + for (src = 0; src < DAL_IRQ_SOURCES_NUMBER; src++) { + DM_IRQ_TABLE_LOCK(adev, irq_table_flags); + /* The handler was removed from the table, + * it means it is safe to flush all the 'work' + * (because no code can schedule a new one). + */ + lh = &adev->dm.irq_handler_list_low_tab[src]; + DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); + + if (!list_empty(lh)) { + list_for_each_safe(entry, tmp, lh) { + handler = list_entry( + entry, + struct amdgpu_dm_irq_handler_data, + list); + flush_work(&handler->work); + } + } + } + /* Deallocate handlers from the table. */ + unregister_all_irq_handlers(adev); +} + +int amdgpu_dm_irq_suspend(struct amdgpu_device *adev) +{ + int src; + struct list_head *hnd_list_h; + struct list_head *hnd_list_l; + unsigned long irq_table_flags; + struct list_head *entry, *tmp; + struct amdgpu_dm_irq_handler_data *handler; + + DM_IRQ_TABLE_LOCK(adev, irq_table_flags); + + DRM_DEBUG_KMS("DM_IRQ: suspend\n"); + + /** + * Disable HW interrupt for HPD and HPDRX only since FLIP and VBLANK + * will be disabled from manage_dm_interrupts on disable CRTC. + */ + for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6RX; src++) { + hnd_list_l = &adev->dm.irq_handler_list_low_tab[src]; + hnd_list_h = &adev->dm.irq_handler_list_high_tab[src]; + if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h)) + dc_interrupt_set(adev->dm.dc, src, false); + + DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); + + if (!list_empty(hnd_list_l)) { + list_for_each_safe(entry, tmp, hnd_list_l) { + handler = list_entry( + entry, + struct amdgpu_dm_irq_handler_data, + list); + flush_work(&handler->work); + } + } + DM_IRQ_TABLE_LOCK(adev, irq_table_flags); + } + + DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); + return 0; +} + +int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev) +{ + int src; + struct list_head *hnd_list_h, *hnd_list_l; + unsigned long irq_table_flags; + + DM_IRQ_TABLE_LOCK(adev, irq_table_flags); + + DRM_DEBUG_KMS("DM_IRQ: early resume\n"); + + /* re-enable short pulse interrupts HW interrupt */ + for (src = DC_IRQ_SOURCE_HPD1RX; src <= DC_IRQ_SOURCE_HPD6RX; src++) { + hnd_list_l = &adev->dm.irq_handler_list_low_tab[src]; + hnd_list_h = &adev->dm.irq_handler_list_high_tab[src]; + if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h)) + dc_interrupt_set(adev->dm.dc, src, true); + } + + DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); + + return 0; +} + +int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev) +{ + int src; + struct list_head *hnd_list_h, *hnd_list_l; + unsigned long irq_table_flags; + + DM_IRQ_TABLE_LOCK(adev, irq_table_flags); + + DRM_DEBUG_KMS("DM_IRQ: resume\n"); + + /** + * Renable HW interrupt for HPD and only since FLIP and VBLANK + * will be enabled from manage_dm_interrupts on enable CRTC. + */ + for (src = DC_IRQ_SOURCE_HPD1; src <= DC_IRQ_SOURCE_HPD6; src++) { + hnd_list_l = &adev->dm.irq_handler_list_low_tab[src]; + hnd_list_h = &adev->dm.irq_handler_list_high_tab[src]; + if (!list_empty(hnd_list_l) || !list_empty(hnd_list_h)) + dc_interrupt_set(adev->dm.dc, src, true); + } + + DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); + return 0; +} + +/* + * amdgpu_dm_irq_schedule_work - schedule all work items registered for the + * "irq_source". + */ +static void amdgpu_dm_irq_schedule_work(struct amdgpu_device *adev, + enum dc_irq_source irq_source) +{ + struct list_head *handler_list = &adev->dm.irq_handler_list_low_tab[irq_source]; + struct amdgpu_dm_irq_handler_data *handler_data; + bool work_queued = false; + + if (list_empty(handler_list)) + return; + + list_for_each_entry(handler_data, handler_list, list) { + if (queue_work(system_highpri_wq, &handler_data->work)) { + work_queued = true; + break; + } + } + + if (!work_queued) { + struct amdgpu_dm_irq_handler_data *handler_data_add; + /*get the amdgpu_dm_irq_handler_data of first item pointed by handler_list*/ + handler_data = container_of(handler_list->next, struct amdgpu_dm_irq_handler_data, list); + + /*allocate a new amdgpu_dm_irq_handler_data*/ + handler_data_add = kzalloc(sizeof(*handler_data), GFP_ATOMIC); + if (!handler_data_add) { + DRM_ERROR("DM_IRQ: failed to allocate irq handler!\n"); + return; + } + + /*copy new amdgpu_dm_irq_handler_data members from handler_data*/ + handler_data_add->handler = handler_data->handler; + handler_data_add->handler_arg = handler_data->handler_arg; + handler_data_add->dm = handler_data->dm; + handler_data_add->irq_source = irq_source; + + list_add_tail(&handler_data_add->list, handler_list); + + INIT_WORK(&handler_data_add->work, dm_irq_work_func); + + if (queue_work(system_highpri_wq, &handler_data_add->work)) + DRM_DEBUG("Queued work for handling interrupt from " + "display for IRQ source %d\n", + irq_source); + else + DRM_ERROR("Failed to queue work for handling interrupt " + "from display for IRQ source %d\n", + irq_source); + } +} + +/* + * amdgpu_dm_irq_immediate_work + * Callback high irq work immediately, don't send to work queue + */ +static void amdgpu_dm_irq_immediate_work(struct amdgpu_device *adev, + enum dc_irq_source irq_source) +{ + struct amdgpu_dm_irq_handler_data *handler_data; + unsigned long irq_table_flags; + + DM_IRQ_TABLE_LOCK(adev, irq_table_flags); + + list_for_each_entry(handler_data, + &adev->dm.irq_handler_list_high_tab[irq_source], + list) { + /* Call a subcomponent which registered for immediate + * interrupt notification + */ + handler_data->handler(handler_data->handler_arg); + } + + DM_IRQ_TABLE_UNLOCK(adev, irq_table_flags); +} + +/** + * amdgpu_dm_irq_handler - Generic DM IRQ handler + * @adev: amdgpu base driver device containing the DM device + * @source: Unused + * @entry: Data about the triggered interrupt + * + * Calls all registered high irq work immediately, and schedules work for low + * irq. The DM IRQ table is used to find the corresponding handlers. + */ +static int amdgpu_dm_irq_handler(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + + enum dc_irq_source src = + dc_interrupt_to_irq_source( + adev->dm.dc, + entry->src_id, + entry->src_data[0]); + + dc_interrupt_ack(adev->dm.dc, src); + + /* Call high irq work immediately */ + amdgpu_dm_irq_immediate_work(adev, src); + /*Schedule low_irq work */ + amdgpu_dm_irq_schedule_work(adev, src); + + return 0; +} + +static enum dc_irq_source amdgpu_dm_hpd_to_dal_irq_source(unsigned int type) +{ + switch (type) { + case AMDGPU_HPD_1: + return DC_IRQ_SOURCE_HPD1; + case AMDGPU_HPD_2: + return DC_IRQ_SOURCE_HPD2; + case AMDGPU_HPD_3: + return DC_IRQ_SOURCE_HPD3; + case AMDGPU_HPD_4: + return DC_IRQ_SOURCE_HPD4; + case AMDGPU_HPD_5: + return DC_IRQ_SOURCE_HPD5; + case AMDGPU_HPD_6: + return DC_IRQ_SOURCE_HPD6; + default: + return DC_IRQ_SOURCE_INVALID; + } +} + +static int amdgpu_dm_set_hpd_irq_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned int type, + enum amdgpu_interrupt_state state) +{ + enum dc_irq_source src = amdgpu_dm_hpd_to_dal_irq_source(type); + bool st = (state == AMDGPU_IRQ_STATE_ENABLE); + + dc_interrupt_set(adev->dm.dc, src, st); + return 0; +} + +static inline int dm_irq_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned int crtc_id, + enum amdgpu_interrupt_state state, + const enum irq_type dal_irq_type, + const char *func) +{ + bool st; + enum dc_irq_source irq_source; + + struct amdgpu_crtc *acrtc = adev->mode_info.crtcs[crtc_id]; + + if (!acrtc) { + DRM_ERROR( + "%s: crtc is NULL at id :%d\n", + func, + crtc_id); + return 0; + } + + if (acrtc->otg_inst == -1) + return 0; + + irq_source = dal_irq_type + acrtc->otg_inst; + + st = (state == AMDGPU_IRQ_STATE_ENABLE); + + dc_interrupt_set(adev->dm.dc, irq_source, st); + return 0; +} + +static int amdgpu_dm_set_pflip_irq_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned int crtc_id, + enum amdgpu_interrupt_state state) +{ + return dm_irq_state( + adev, + source, + crtc_id, + state, + IRQ_TYPE_PFLIP, + __func__); +} + +static int amdgpu_dm_set_crtc_irq_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned int crtc_id, + enum amdgpu_interrupt_state state) +{ + return dm_irq_state( + adev, + source, + crtc_id, + state, + IRQ_TYPE_VBLANK, + __func__); +} + +static int amdgpu_dm_set_vline0_irq_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned int crtc_id, + enum amdgpu_interrupt_state state) +{ + return dm_irq_state( + adev, + source, + crtc_id, + state, + IRQ_TYPE_VLINE0, + __func__); +} + +static int amdgpu_dm_set_dmub_outbox_irq_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned int crtc_id, + enum amdgpu_interrupt_state state) +{ + enum dc_irq_source irq_source = DC_IRQ_SOURCE_DMCUB_OUTBOX; + bool st = (state == AMDGPU_IRQ_STATE_ENABLE); + + dc_interrupt_set(adev->dm.dc, irq_source, st); + return 0; +} + +static int amdgpu_dm_set_vupdate_irq_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned int crtc_id, + enum amdgpu_interrupt_state state) +{ + return dm_irq_state( + adev, + source, + crtc_id, + state, + IRQ_TYPE_VUPDATE, + __func__); +} + +static int amdgpu_dm_set_dmub_trace_irq_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned int type, + enum amdgpu_interrupt_state state) +{ + enum dc_irq_source irq_source = DC_IRQ_SOURCE_DMCUB_OUTBOX0; + bool st = (state == AMDGPU_IRQ_STATE_ENABLE); + + dc_interrupt_set(adev->dm.dc, irq_source, st); + return 0; +} + +static const struct amdgpu_irq_src_funcs dm_crtc_irq_funcs = { + .set = amdgpu_dm_set_crtc_irq_state, + .process = amdgpu_dm_irq_handler, +}; + +static const struct amdgpu_irq_src_funcs dm_vline0_irq_funcs = { + .set = amdgpu_dm_set_vline0_irq_state, + .process = amdgpu_dm_irq_handler, +}; + +static const struct amdgpu_irq_src_funcs dm_dmub_outbox_irq_funcs = { + .set = amdgpu_dm_set_dmub_outbox_irq_state, + .process = amdgpu_dm_irq_handler, +}; + +static const struct amdgpu_irq_src_funcs dm_vupdate_irq_funcs = { + .set = amdgpu_dm_set_vupdate_irq_state, + .process = amdgpu_dm_irq_handler, +}; + +static const struct amdgpu_irq_src_funcs dm_dmub_trace_irq_funcs = { + .set = amdgpu_dm_set_dmub_trace_irq_state, + .process = amdgpu_dm_irq_handler, +}; + +static const struct amdgpu_irq_src_funcs dm_pageflip_irq_funcs = { + .set = amdgpu_dm_set_pflip_irq_state, + .process = amdgpu_dm_irq_handler, +}; + +static const struct amdgpu_irq_src_funcs dm_hpd_irq_funcs = { + .set = amdgpu_dm_set_hpd_irq_state, + .process = amdgpu_dm_irq_handler, +}; + +void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev) +{ + adev->crtc_irq.num_types = adev->mode_info.num_crtc; + adev->crtc_irq.funcs = &dm_crtc_irq_funcs; + + adev->vline0_irq.num_types = adev->mode_info.num_crtc; + adev->vline0_irq.funcs = &dm_vline0_irq_funcs; + + adev->dmub_outbox_irq.num_types = 1; + adev->dmub_outbox_irq.funcs = &dm_dmub_outbox_irq_funcs; + + adev->vupdate_irq.num_types = adev->mode_info.num_crtc; + adev->vupdate_irq.funcs = &dm_vupdate_irq_funcs; + + adev->dmub_trace_irq.num_types = 1; + adev->dmub_trace_irq.funcs = &dm_dmub_trace_irq_funcs; + + adev->pageflip_irq.num_types = adev->mode_info.num_crtc; + adev->pageflip_irq.funcs = &dm_pageflip_irq_funcs; + + adev->hpd_irq.num_types = adev->mode_info.num_hpd; + adev->hpd_irq.funcs = &dm_hpd_irq_funcs; +} +void amdgpu_dm_outbox_init(struct amdgpu_device *adev) +{ + dc_interrupt_set(adev->dm.dc, + DC_IRQ_SOURCE_DMCUB_OUTBOX, + true); +} + +/** + * amdgpu_dm_hpd_init - hpd setup callback. + * + * @adev: amdgpu_device pointer + * + * Setup the hpd pins used by the card (evergreen+). + * Enable the pin, set the polarity, and enable the hpd interrupts. + */ +void amdgpu_dm_hpd_init(struct amdgpu_device *adev) +{ + struct drm_device *dev = adev_to_drm(adev); + struct drm_connector *connector; + struct drm_connector_list_iter iter; + + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { + struct amdgpu_dm_connector *amdgpu_dm_connector = + to_amdgpu_dm_connector(connector); + + const struct dc_link *dc_link = amdgpu_dm_connector->dc_link; + + if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) { + dc_interrupt_set(adev->dm.dc, + dc_link->irq_source_hpd, + true); + } + + if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) { + dc_interrupt_set(adev->dm.dc, + dc_link->irq_source_hpd_rx, + true); + } + } + drm_connector_list_iter_end(&iter); +} + +/** + * amdgpu_dm_hpd_fini - hpd tear down callback. + * + * @adev: amdgpu_device pointer + * + * Tear down the hpd pins used by the card (evergreen+). + * Disable the hpd interrupts. + */ +void amdgpu_dm_hpd_fini(struct amdgpu_device *adev) +{ + struct drm_device *dev = adev_to_drm(adev); + struct drm_connector *connector; + struct drm_connector_list_iter iter; + + drm_connector_list_iter_begin(dev, &iter); + drm_for_each_connector_iter(connector, &iter) { + struct amdgpu_dm_connector *amdgpu_dm_connector = + to_amdgpu_dm_connector(connector); + const struct dc_link *dc_link = amdgpu_dm_connector->dc_link; + + if (dc_link->irq_source_hpd != DC_IRQ_SOURCE_INVALID) { + dc_interrupt_set(adev->dm.dc, + dc_link->irq_source_hpd, + false); + } + + if (dc_link->irq_source_hpd_rx != DC_IRQ_SOURCE_INVALID) { + dc_interrupt_set(adev->dm.dc, + dc_link->irq_source_hpd_rx, + false); + } + } + drm_connector_list_iter_end(&iter); +} diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h new file mode 100644 index 0000000000..2349238a62 --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq.h @@ -0,0 +1,103 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __AMDGPU_DM_IRQ_H__ +#define __AMDGPU_DM_IRQ_H__ + +#include "irq_types.h" /* DAL irq definitions */ + +/* + * Display Manager IRQ-related interfaces (for use by DAL). + */ + +/** + * amdgpu_dm_irq_init - Initialize internal structures of 'amdgpu_dm_irq'. + * + * This function should be called exactly once - during DM initialization. + * + * Returns: + * 0 - success + * non-zero - error + */ +int amdgpu_dm_irq_init(struct amdgpu_device *adev); + +/** + * amdgpu_dm_irq_fini - deallocate internal structures of 'amdgpu_dm_irq'. + * + * This function should be called exactly once - during DM destruction. + * + */ +void amdgpu_dm_irq_fini(struct amdgpu_device *adev); + +/** + * amdgpu_dm_irq_register_interrupt - register irq handler for Display block. + * + * @adev: AMD DRM device + * @int_params: parameters for the irq + * @ih: pointer to the irq hander function + * @handler_args: arguments which will be passed to ih + * + * Returns: + * IRQ Handler Index on success. + * NULL on failure. + * + * Cannot be called from an interrupt handler. + */ +void *amdgpu_dm_irq_register_interrupt(struct amdgpu_device *adev, + struct dc_interrupt_params *int_params, + void (*ih)(void *), + void *handler_args); + +/** + * amdgpu_dm_irq_unregister_interrupt - unregister handler which was registered + * by amdgpu_dm_irq_register_interrupt(). + * + * @adev: AMD DRM device. + * @ih_index: irq handler index which was returned by + * amdgpu_dm_irq_register_interrupt + */ +void amdgpu_dm_irq_unregister_interrupt(struct amdgpu_device *adev, + enum dc_irq_source irq_source, + void *ih_index); + +void amdgpu_dm_set_irq_funcs(struct amdgpu_device *adev); + +void amdgpu_dm_outbox_init(struct amdgpu_device *adev); +void amdgpu_dm_hpd_init(struct amdgpu_device *adev); +void amdgpu_dm_hpd_fini(struct amdgpu_device *adev); + +/** + * amdgpu_dm_irq_suspend - disable ASIC interrupt during suspend. + * + */ +int amdgpu_dm_irq_suspend(struct amdgpu_device *adev); + +/** + * amdgpu_dm_irq_resume_early - enable HPDRX ASIC interrupts during resume. + * amdgpu_dm_irq_resume - enable ASIC interrupt during resume. + * + */ +int amdgpu_dm_irq_resume_early(struct amdgpu_device *adev); +int amdgpu_dm_irq_resume_late(struct amdgpu_device *adev); + +#endif /* __AMDGPU_DM_IRQ_H__ */ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h new file mode 100644 index 0000000000..5c9303241a --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_irq_params.h @@ -0,0 +1,47 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __AMDGPU_DM_IRQ_PARAMS_H__ +#define __AMDGPU_DM_IRQ_PARAMS_H__ + +#include "amdgpu_dm_crc.h" + +struct dm_irq_params { + u32 last_flip_vblank; + struct mod_vrr_params vrr_params; + struct dc_stream_state *stream; + int active_planes; + bool allow_psr_entry; + struct mod_freesync_config freesync_config; + +#ifdef CONFIG_DEBUG_FS + enum amdgpu_dm_pipe_crc_source crc_src; +#ifdef CONFIG_DRM_AMD_SECURE_DISPLAY + struct crc_window_param window_param; +#endif +#endif +}; + +#endif /* __AMDGPU_DM_IRQ_PARAMS_H__ */ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c new file mode 100644 index 0000000000..10dd4cd6f5 --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -0,0 +1,1665 @@ +/* + * Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include <drm/display/drm_dp_helper.h> +#include <drm/display/drm_dp_mst_helper.h> +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include "dm_services.h" +#include "amdgpu.h" +#include "amdgpu_dm.h" +#include "amdgpu_dm_mst_types.h" +#include "amdgpu_dm_hdcp.h" + +#include "dc.h" +#include "dm_helpers.h" + +#include "ddc_service_types.h" +#include "dpcd_defs.h" + +#include "dmub_cmd.h" +#if defined(CONFIG_DEBUG_FS) +#include "amdgpu_dm_debugfs.h" +#endif + +#include "dc/dcn20/dcn20_resource.h" + +#define PEAK_FACTOR_X1000 1006 + +static ssize_t dm_dp_aux_transfer(struct drm_dp_aux *aux, + struct drm_dp_aux_msg *msg) +{ + ssize_t result = 0; + struct aux_payload payload; + enum aux_return_code_type operation_result; + struct amdgpu_device *adev; + struct ddc_service *ddc; + + if (WARN_ON(msg->size > 16)) + return -E2BIG; + + payload.address = msg->address; + payload.data = msg->buffer; + payload.length = msg->size; + payload.reply = &msg->reply; + payload.i2c_over_aux = (msg->request & DP_AUX_NATIVE_WRITE) == 0; + payload.write = (msg->request & DP_AUX_I2C_READ) == 0; + payload.mot = (msg->request & DP_AUX_I2C_MOT) != 0; + payload.write_status_update = + (msg->request & DP_AUX_I2C_WRITE_STATUS_UPDATE) != 0; + payload.defer_delay = 0; + + result = dc_link_aux_transfer_raw(TO_DM_AUX(aux)->ddc_service, &payload, + &operation_result); + + /* + * w/a on certain intel platform where hpd is unexpected to pull low during + * 1st sideband message transaction by return AUX_RET_ERROR_HPD_DISCON + * aux transaction is succuess in such case, therefore bypass the error + */ + ddc = TO_DM_AUX(aux)->ddc_service; + adev = ddc->ctx->driver_context; + if (adev->dm.aux_hpd_discon_quirk) { + if (msg->address == DP_SIDEBAND_MSG_DOWN_REQ_BASE && + operation_result == AUX_RET_ERROR_HPD_DISCON) { + result = 0; + operation_result = AUX_RET_SUCCESS; + } + } + + if (payload.write && result >= 0) + result = msg->size; + + if (result < 0) + switch (operation_result) { + case AUX_RET_SUCCESS: + break; + case AUX_RET_ERROR_HPD_DISCON: + case AUX_RET_ERROR_UNKNOWN: + case AUX_RET_ERROR_INVALID_OPERATION: + case AUX_RET_ERROR_PROTOCOL_ERROR: + result = -EIO; + break; + case AUX_RET_ERROR_INVALID_REPLY: + case AUX_RET_ERROR_ENGINE_ACQUIRE: + result = -EBUSY; + break; + case AUX_RET_ERROR_TIMEOUT: + result = -ETIMEDOUT; + break; + } + + return result; +} + +static void +dm_dp_mst_connector_destroy(struct drm_connector *connector) +{ + struct amdgpu_dm_connector *aconnector = + to_amdgpu_dm_connector(connector); + + if (aconnector->dc_sink) { + dc_link_remove_remote_sink(aconnector->dc_link, + aconnector->dc_sink); + dc_sink_release(aconnector->dc_sink); + } + + kfree(aconnector->edid); + + drm_connector_cleanup(connector); + drm_dp_mst_put_port_malloc(aconnector->mst_output_port); + kfree(aconnector); +} + +static int +amdgpu_dm_mst_connector_late_register(struct drm_connector *connector) +{ + struct amdgpu_dm_connector *amdgpu_dm_connector = + to_amdgpu_dm_connector(connector); + int r; + + r = drm_dp_mst_connector_late_register(connector, + amdgpu_dm_connector->mst_output_port); + if (r < 0) + return r; + +#if defined(CONFIG_DEBUG_FS) + connector_debugfs_init(amdgpu_dm_connector); +#endif + + return 0; +} + +static void +amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector) +{ + struct amdgpu_dm_connector *aconnector = + to_amdgpu_dm_connector(connector); + struct drm_dp_mst_port *port = aconnector->mst_output_port; + struct amdgpu_dm_connector *root = aconnector->mst_root; + struct dc_link *dc_link = aconnector->dc_link; + struct dc_sink *dc_sink = aconnector->dc_sink; + + drm_dp_mst_connector_early_unregister(connector, port); + + /* + * Release dc_sink for connector which its attached port is + * no longer in the mst topology + */ + drm_modeset_lock(&root->mst_mgr.base.lock, NULL); + if (dc_sink) { + if (dc_link->sink_count) + dc_link_remove_remote_sink(dc_link, dc_sink); + + DC_LOG_MST("DM_MST: remove remote sink 0x%p, %d remaining\n", + dc_sink, dc_link->sink_count); + + dc_sink_release(dc_sink); + aconnector->dc_sink = NULL; + aconnector->edid = NULL; + } + + aconnector->mst_status = MST_STATUS_DEFAULT; + drm_modeset_unlock(&root->mst_mgr.base.lock); +} + +static const struct drm_connector_funcs dm_dp_mst_connector_funcs = { + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = dm_dp_mst_connector_destroy, + .reset = amdgpu_dm_connector_funcs_reset, + .atomic_duplicate_state = amdgpu_dm_connector_atomic_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, + .atomic_set_property = amdgpu_dm_connector_atomic_set_property, + .atomic_get_property = amdgpu_dm_connector_atomic_get_property, + .late_register = amdgpu_dm_mst_connector_late_register, + .early_unregister = amdgpu_dm_mst_connector_early_unregister, +}; + +bool needs_dsc_aux_workaround(struct dc_link *link) +{ + if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 && + (link->dpcd_caps.dpcd_rev.raw == DPCD_REV_14 || link->dpcd_caps.dpcd_rev.raw == DPCD_REV_12) && + link->dpcd_caps.sink_count.bits.SINK_COUNT >= 2) + return true; + + return false; +} + +static bool is_synaptics_cascaded_panamera(struct dc_link *link, struct drm_dp_mst_port *port) +{ + u8 branch_vendor_data[4] = { 0 }; // Vendor data 0x50C ~ 0x50F + + if (drm_dp_dpcd_read(port->mgr->aux, DP_BRANCH_VENDOR_SPECIFIC_START, &branch_vendor_data, 4) == 4) { + if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 && + IS_SYNAPTICS_CASCADED_PANAMERA(link->dpcd_caps.branch_dev_name, branch_vendor_data)) { + DRM_INFO("Synaptics Cascaded MST hub\n"); + return true; + } + } + + return false; +} + +static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnector) +{ + struct dc_sink *dc_sink = aconnector->dc_sink; + struct drm_dp_mst_port *port = aconnector->mst_output_port; + u8 dsc_caps[16] = { 0 }; + u8 dsc_branch_dec_caps_raw[3] = { 0 }; // DSC branch decoder caps 0xA0 ~ 0xA2 + u8 *dsc_branch_dec_caps = NULL; + + aconnector->dsc_aux = drm_dp_mst_dsc_aux_for_port(port); + + /* + * drm_dp_mst_dsc_aux_for_port() will return NULL for certain configs + * because it only check the dsc/fec caps of the "port variable" and not the dock + * + * This case will return NULL: DSC capabe MST dock connected to a non fec/dsc capable display + * + * Workaround: explicitly check the use case above and use the mst dock's aux as dsc_aux + * + */ + if (!aconnector->dsc_aux && !port->parent->port_parent && + needs_dsc_aux_workaround(aconnector->dc_link)) + aconnector->dsc_aux = &aconnector->mst_root->dm_dp_aux.aux; + + /* synaptics cascaded MST hub case */ + if (!aconnector->dsc_aux && is_synaptics_cascaded_panamera(aconnector->dc_link, port)) + aconnector->dsc_aux = port->mgr->aux; + + if (!aconnector->dsc_aux) + return false; + + if (drm_dp_dpcd_read(aconnector->dsc_aux, DP_DSC_SUPPORT, dsc_caps, 16) < 0) + return false; + + if (drm_dp_dpcd_read(aconnector->dsc_aux, + DP_DSC_BRANCH_OVERALL_THROUGHPUT_0, dsc_branch_dec_caps_raw, 3) == 3) + dsc_branch_dec_caps = dsc_branch_dec_caps_raw; + + if (!dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc, + dsc_caps, dsc_branch_dec_caps, + &dc_sink->dsc_caps.dsc_dec_caps)) + return false; + + return true; +} + +static bool retrieve_downstream_port_device(struct amdgpu_dm_connector *aconnector) +{ + union dp_downstream_port_present ds_port_present; + + if (!aconnector->dsc_aux) + return false; + + if (drm_dp_dpcd_read(aconnector->dsc_aux, DP_DOWNSTREAMPORT_PRESENT, &ds_port_present, 1) < 0) { + DRM_INFO("Failed to read downstream_port_present 0x05 from DFP of branch device\n"); + return false; + } + + aconnector->mst_downstream_port_present = ds_port_present; + DRM_INFO("Downstream port present %d, type %d\n", + ds_port_present.fields.PORT_PRESENT, ds_port_present.fields.PORT_TYPE); + + return true; +} + +static int dm_dp_mst_get_modes(struct drm_connector *connector) +{ + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + int ret = 0; + + if (!aconnector) + return drm_add_edid_modes(connector, NULL); + + if (!aconnector->edid) { + struct edid *edid; + + edid = drm_dp_mst_get_edid(connector, &aconnector->mst_root->mst_mgr, aconnector->mst_output_port); + + if (!edid) { + amdgpu_dm_set_mst_status(&aconnector->mst_status, + MST_REMOTE_EDID, false); + + drm_connector_update_edid_property( + &aconnector->base, + NULL); + + DRM_DEBUG_KMS("Can't get EDID of %s. Add default remote sink.", connector->name); + if (!aconnector->dc_sink) { + struct dc_sink *dc_sink; + struct dc_sink_init_data init_params = { + .link = aconnector->dc_link, + .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST }; + + dc_sink = dc_link_add_remote_sink( + aconnector->dc_link, + NULL, + 0, + &init_params); + + if (!dc_sink) { + DRM_ERROR("Unable to add a remote sink\n"); + return 0; + } + + DC_LOG_MST("DM_MST: add remote sink 0x%p, %d remaining\n", + dc_sink, aconnector->dc_link->sink_count); + + dc_sink->priv = aconnector; + aconnector->dc_sink = dc_sink; + } + + return ret; + } + + aconnector->edid = edid; + amdgpu_dm_set_mst_status(&aconnector->mst_status, + MST_REMOTE_EDID, true); + } + + if (aconnector->dc_sink && aconnector->dc_sink->sink_signal == SIGNAL_TYPE_VIRTUAL) { + dc_sink_release(aconnector->dc_sink); + aconnector->dc_sink = NULL; + } + + if (!aconnector->dc_sink) { + struct dc_sink *dc_sink; + struct dc_sink_init_data init_params = { + .link = aconnector->dc_link, + .sink_signal = SIGNAL_TYPE_DISPLAY_PORT_MST }; + dc_sink = dc_link_add_remote_sink( + aconnector->dc_link, + (uint8_t *)aconnector->edid, + (aconnector->edid->extensions + 1) * EDID_LENGTH, + &init_params); + + if (!dc_sink) { + DRM_ERROR("Unable to add a remote sink\n"); + return 0; + } + + DC_LOG_MST("DM_MST: add remote sink 0x%p, %d remaining\n", + dc_sink, aconnector->dc_link->sink_count); + + dc_sink->priv = aconnector; + /* dc_link_add_remote_sink returns a new reference */ + aconnector->dc_sink = dc_sink; + + /* when display is unplugged from mst hub, connctor will be + * destroyed within dm_dp_mst_connector_destroy. connector + * hdcp perperties, like type, undesired, desired, enabled, + * will be lost. So, save hdcp properties into hdcp_work within + * amdgpu_dm_atomic_commit_tail. if the same display is + * plugged back with same display index, its hdcp properties + * will be retrieved from hdcp_work within dm_dp_mst_get_modes + */ + if (aconnector->dc_sink && connector->state) { + struct drm_device *dev = connector->dev; + struct amdgpu_device *adev = drm_to_adev(dev); + + if (adev->dm.hdcp_workqueue) { + struct hdcp_workqueue *hdcp_work = adev->dm.hdcp_workqueue; + struct hdcp_workqueue *hdcp_w = + &hdcp_work[aconnector->dc_link->link_index]; + + connector->state->hdcp_content_type = + hdcp_w->hdcp_content_type[connector->index]; + connector->state->content_protection = + hdcp_w->content_protection[connector->index]; + } + } + + if (aconnector->dc_sink) { + amdgpu_dm_update_freesync_caps( + connector, aconnector->edid); + + if (!validate_dsc_caps_on_connector(aconnector)) + memset(&aconnector->dc_sink->dsc_caps, + 0, sizeof(aconnector->dc_sink->dsc_caps)); + + if (!retrieve_downstream_port_device(aconnector)) + memset(&aconnector->mst_downstream_port_present, + 0, sizeof(aconnector->mst_downstream_port_present)); + } + } + + drm_connector_update_edid_property( + &aconnector->base, aconnector->edid); + + ret = drm_add_edid_modes(connector, aconnector->edid); + + return ret; +} + +static struct drm_encoder * +dm_mst_atomic_best_encoder(struct drm_connector *connector, + struct drm_atomic_state *state) +{ + struct drm_connector_state *connector_state = drm_atomic_get_new_connector_state(state, + connector); + struct drm_device *dev = connector->dev; + struct amdgpu_device *adev = drm_to_adev(dev); + struct amdgpu_crtc *acrtc = to_amdgpu_crtc(connector_state->crtc); + + return &adev->dm.mst_encoders[acrtc->crtc_id].base; +} + +static int +dm_dp_mst_detect(struct drm_connector *connector, + struct drm_modeset_acquire_ctx *ctx, bool force) +{ + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + struct amdgpu_dm_connector *master = aconnector->mst_root; + struct drm_dp_mst_port *port = aconnector->mst_output_port; + int connection_status; + + if (drm_connector_is_unregistered(connector)) + return connector_status_disconnected; + + connection_status = drm_dp_mst_detect_port(connector, ctx, &master->mst_mgr, + aconnector->mst_output_port); + + if (port->pdt != DP_PEER_DEVICE_NONE && !port->dpcd_rev) { + uint8_t dpcd_rev; + int ret; + + ret = drm_dp_dpcd_readb(&port->aux, DP_DP13_DPCD_REV, &dpcd_rev); + + if (ret == 1) { + port->dpcd_rev = dpcd_rev; + + /* Could be DP1.2 DP Rx case*/ + if (!dpcd_rev) { + ret = drm_dp_dpcd_readb(&port->aux, DP_DPCD_REV, &dpcd_rev); + + if (ret == 1) + port->dpcd_rev = dpcd_rev; + } + + if (!dpcd_rev) + DRM_DEBUG_KMS("Can't decide DPCD revision number!"); + } + + /* + * Could be legacy sink, logical port etc on DP1.2. + * Will get Nack under these cases when issue remote + * DPCD read. + */ + if (ret != 1) + DRM_DEBUG_KMS("Can't access DPCD"); + } else if (port->pdt == DP_PEER_DEVICE_NONE) { + port->dpcd_rev = 0; + } + + /* + * Release dc_sink for connector which unplug event is notified by CSN msg + */ + if (connection_status == connector_status_disconnected && aconnector->dc_sink) { + if (aconnector->dc_link->sink_count) + dc_link_remove_remote_sink(aconnector->dc_link, aconnector->dc_sink); + + DC_LOG_MST("DM_MST: remove remote sink 0x%p, %d remaining\n", + aconnector->dc_link, aconnector->dc_link->sink_count); + + dc_sink_release(aconnector->dc_sink); + aconnector->dc_sink = NULL; + aconnector->edid = NULL; + + amdgpu_dm_set_mst_status(&aconnector->mst_status, + MST_REMOTE_EDID | MST_ALLOCATE_NEW_PAYLOAD | MST_CLEAR_ALLOCATED_PAYLOAD, + false); + } + + return connection_status; +} + +static int dm_dp_mst_atomic_check(struct drm_connector *connector, + struct drm_atomic_state *state) +{ + struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector); + struct drm_dp_mst_topology_mgr *mst_mgr = &aconnector->mst_root->mst_mgr; + struct drm_dp_mst_port *mst_port = aconnector->mst_output_port; + + return drm_dp_atomic_release_time_slots(state, mst_mgr, mst_port); +} + +static const struct drm_connector_helper_funcs dm_dp_mst_connector_helper_funcs = { + .get_modes = dm_dp_mst_get_modes, + .mode_valid = amdgpu_dm_connector_mode_valid, + .atomic_best_encoder = dm_mst_atomic_best_encoder, + .detect_ctx = dm_dp_mst_detect, + .atomic_check = dm_dp_mst_atomic_check, +}; + +static void amdgpu_dm_encoder_destroy(struct drm_encoder *encoder) +{ + drm_encoder_cleanup(encoder); +} + +static const struct drm_encoder_funcs amdgpu_dm_encoder_funcs = { + .destroy = amdgpu_dm_encoder_destroy, +}; + +void +dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev) +{ + struct drm_device *dev = adev_to_drm(adev); + int i; + + for (i = 0; i < adev->dm.display_indexes_num; i++) { + struct amdgpu_encoder *amdgpu_encoder = &adev->dm.mst_encoders[i]; + struct drm_encoder *encoder = &amdgpu_encoder->base; + + encoder->possible_crtcs = amdgpu_dm_get_encoder_crtc_mask(adev); + + drm_encoder_init( + dev, + &amdgpu_encoder->base, + &amdgpu_dm_encoder_funcs, + DRM_MODE_ENCODER_DPMST, + NULL); + + drm_encoder_helper_add(encoder, &amdgpu_dm_encoder_helper_funcs); + } +} + +static struct drm_connector * +dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr, + struct drm_dp_mst_port *port, + const char *pathprop) +{ + struct amdgpu_dm_connector *master = container_of(mgr, struct amdgpu_dm_connector, mst_mgr); + struct drm_device *dev = master->base.dev; + struct amdgpu_device *adev = drm_to_adev(dev); + struct amdgpu_dm_connector *aconnector; + struct drm_connector *connector; + int i; + + aconnector = kzalloc(sizeof(*aconnector), GFP_KERNEL); + if (!aconnector) + return NULL; + + connector = &aconnector->base; + aconnector->mst_output_port = port; + aconnector->mst_root = master; + amdgpu_dm_set_mst_status(&aconnector->mst_status, + MST_PROBE, true); + + if (drm_connector_init( + dev, + connector, + &dm_dp_mst_connector_funcs, + DRM_MODE_CONNECTOR_DisplayPort)) { + kfree(aconnector); + return NULL; + } + drm_connector_helper_add(connector, &dm_dp_mst_connector_helper_funcs); + + amdgpu_dm_connector_init_helper( + &adev->dm, + aconnector, + DRM_MODE_CONNECTOR_DisplayPort, + master->dc_link, + master->connector_id); + + for (i = 0; i < adev->dm.display_indexes_num; i++) { + drm_connector_attach_encoder(&aconnector->base, + &adev->dm.mst_encoders[i].base); + } + + connector->max_bpc_property = master->base.max_bpc_property; + if (connector->max_bpc_property) + drm_connector_attach_max_bpc_property(connector, 8, 16); + + connector->vrr_capable_property = master->base.vrr_capable_property; + if (connector->vrr_capable_property) + drm_connector_attach_vrr_capable_property(connector); + + drm_object_attach_property( + &connector->base, + dev->mode_config.path_property, + 0); + drm_object_attach_property( + &connector->base, + dev->mode_config.tile_property, + 0); + + drm_connector_set_path_property(connector, pathprop); + + /* + * Initialize connector state before adding the connectror to drm and + * framebuffer lists + */ + amdgpu_dm_connector_funcs_reset(connector); + + drm_dp_mst_get_port_malloc(port); + + return connector; +} + +void dm_handle_mst_sideband_msg_ready_event( + struct drm_dp_mst_topology_mgr *mgr, + enum mst_msg_ready_type msg_rdy_type) +{ + uint8_t esi[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = { 0 }; + uint8_t dret; + bool new_irq_handled = false; + int dpcd_addr; + uint8_t dpcd_bytes_to_read; + const uint8_t max_process_count = 30; + uint8_t process_count = 0; + u8 retry; + struct amdgpu_dm_connector *aconnector = + container_of(mgr, struct amdgpu_dm_connector, mst_mgr); + + + const struct dc_link_status *link_status = dc_link_get_status(aconnector->dc_link); + + if (link_status->dpcd_caps->dpcd_rev.raw < 0x12) { + dpcd_bytes_to_read = DP_LANE0_1_STATUS - DP_SINK_COUNT; + /* DPCD 0x200 - 0x201 for downstream IRQ */ + dpcd_addr = DP_SINK_COUNT; + } else { + dpcd_bytes_to_read = DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI; + /* DPCD 0x2002 - 0x2005 for downstream IRQ */ + dpcd_addr = DP_SINK_COUNT_ESI; + } + + mutex_lock(&aconnector->handle_mst_msg_ready); + + while (process_count < max_process_count) { + u8 ack[DP_PSR_ERROR_STATUS - DP_SINK_COUNT_ESI] = {}; + + process_count++; + + dret = drm_dp_dpcd_read( + &aconnector->dm_dp_aux.aux, + dpcd_addr, + esi, + dpcd_bytes_to_read); + + if (dret != dpcd_bytes_to_read) { + DRM_DEBUG_KMS("DPCD read and acked number is not as expected!"); + break; + } + + DRM_DEBUG_DRIVER("ESI %02x %02x %02x\n", esi[0], esi[1], esi[2]); + + switch (msg_rdy_type) { + case DOWN_REP_MSG_RDY_EVENT: + /* Only handle DOWN_REP_MSG_RDY case*/ + esi[1] &= DP_DOWN_REP_MSG_RDY; + break; + case UP_REQ_MSG_RDY_EVENT: + /* Only handle UP_REQ_MSG_RDY case*/ + esi[1] &= DP_UP_REQ_MSG_RDY; + break; + default: + /* Handle both cases*/ + esi[1] &= (DP_DOWN_REP_MSG_RDY | DP_UP_REQ_MSG_RDY); + break; + } + + if (!esi[1]) + break; + + /* handle MST irq */ + if (aconnector->mst_mgr.mst_state) + drm_dp_mst_hpd_irq_handle_event(&aconnector->mst_mgr, + esi, + ack, + &new_irq_handled); + + if (new_irq_handled) { + /* ACK at DPCD to notify down stream */ + for (retry = 0; retry < 3; retry++) { + ssize_t wret; + + wret = drm_dp_dpcd_writeb(&aconnector->dm_dp_aux.aux, + dpcd_addr + 1, + ack[1]); + if (wret == 1) + break; + } + + if (retry == 3) { + DRM_ERROR("Failed to ack MST event.\n"); + break; + } + + drm_dp_mst_hpd_irq_send_new_request(&aconnector->mst_mgr); + + new_irq_handled = false; + } else { + break; + } + } + + mutex_unlock(&aconnector->handle_mst_msg_ready); + + if (process_count == max_process_count) + DRM_DEBUG_DRIVER("Loop exceeded max iterations\n"); +} + +static void dm_handle_mst_down_rep_msg_ready(struct drm_dp_mst_topology_mgr *mgr) +{ + dm_handle_mst_sideband_msg_ready_event(mgr, DOWN_REP_MSG_RDY_EVENT); +} + +static const struct drm_dp_mst_topology_cbs dm_mst_cbs = { + .add_connector = dm_dp_add_mst_connector, + .poll_hpd_irq = dm_handle_mst_down_rep_msg_ready, +}; + +void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, + struct amdgpu_dm_connector *aconnector, + int link_index) +{ + struct dc_link_settings max_link_enc_cap = {0}; + + aconnector->dm_dp_aux.aux.name = + kasprintf(GFP_KERNEL, "AMDGPU DM aux hw bus %d", + link_index); + aconnector->dm_dp_aux.aux.transfer = dm_dp_aux_transfer; + aconnector->dm_dp_aux.aux.drm_dev = dm->ddev; + aconnector->dm_dp_aux.ddc_service = aconnector->dc_link->ddc; + + drm_dp_aux_init(&aconnector->dm_dp_aux.aux); + drm_dp_cec_register_connector(&aconnector->dm_dp_aux.aux, + &aconnector->base); + + if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_eDP) + return; + + dc_link_dp_get_max_link_enc_cap(aconnector->dc_link, &max_link_enc_cap); + aconnector->mst_mgr.cbs = &dm_mst_cbs; + drm_dp_mst_topology_mgr_init(&aconnector->mst_mgr, adev_to_drm(dm->adev), + &aconnector->dm_dp_aux.aux, 16, 4, aconnector->connector_id); + + drm_connector_attach_dp_subconnector_property(&aconnector->base); +} + +int dm_mst_get_pbn_divider(struct dc_link *link) +{ + if (!link) + return 0; + + return dc_link_bandwidth_kbps(link, + dc_link_get_link_cap(link)) / (8 * 1000 * 54); +} + +struct dsc_mst_fairness_params { + struct dc_crtc_timing *timing; + struct dc_sink *sink; + struct dc_dsc_bw_range bw_range; + bool compression_possible; + struct drm_dp_mst_port *port; + enum dsc_clock_force_state clock_force_enable; + uint32_t num_slices_h; + uint32_t num_slices_v; + uint32_t bpp_overwrite; + struct amdgpu_dm_connector *aconnector; +}; + +static uint16_t get_fec_overhead_multiplier(struct dc_link *dc_link) +{ + u8 link_coding_cap; + uint16_t fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B; + + link_coding_cap = dc_link_dp_mst_decide_link_encoding_format(dc_link); + if (link_coding_cap == DP_128b_132b_ENCODING) + fec_overhead_multiplier_x1000 = PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B; + + return fec_overhead_multiplier_x1000; +} + +static int kbps_to_peak_pbn(int kbps, uint16_t fec_overhead_multiplier_x1000) +{ + u64 peak_kbps = kbps; + + peak_kbps *= 1006; + peak_kbps *= fec_overhead_multiplier_x1000; + peak_kbps = div_u64(peak_kbps, 1000 * 1000); + return (int) DIV64_U64_ROUND_UP(peak_kbps * 64, (54 * 8 * 1000)); +} + +static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *params, + struct dsc_mst_fairness_vars *vars, + int count, + int k) +{ + struct drm_connector *drm_connector; + int i; + struct dc_dsc_config_options dsc_options = {0}; + + for (i = 0; i < count; i++) { + drm_connector = ¶ms[i].aconnector->base; + + dc_dsc_get_default_config_option(params[i].sink->ctx->dc, &dsc_options); + dsc_options.max_target_bpp_limit_override_x16 = drm_connector->display_info.max_dsc_bpp * 16; + + memset(¶ms[i].timing->dsc_cfg, 0, sizeof(params[i].timing->dsc_cfg)); + if (vars[i + k].dsc_enabled && dc_dsc_compute_config( + params[i].sink->ctx->dc->res_pool->dscs[0], + ¶ms[i].sink->dsc_caps.dsc_dec_caps, + &dsc_options, + 0, + params[i].timing, + dc_link_get_highest_encoding_format(params[i].aconnector->dc_link), + ¶ms[i].timing->dsc_cfg)) { + params[i].timing->flags.DSC = 1; + + if (params[i].bpp_overwrite) + params[i].timing->dsc_cfg.bits_per_pixel = params[i].bpp_overwrite; + else + params[i].timing->dsc_cfg.bits_per_pixel = vars[i + k].bpp_x16; + + if (params[i].num_slices_h) + params[i].timing->dsc_cfg.num_slices_h = params[i].num_slices_h; + + if (params[i].num_slices_v) + params[i].timing->dsc_cfg.num_slices_v = params[i].num_slices_v; + } else { + params[i].timing->flags.DSC = 0; + } + params[i].timing->dsc_cfg.mst_pbn = vars[i + k].pbn; + } + + for (i = 0; i < count; i++) { + if (params[i].sink) { + if (params[i].sink->sink_signal != SIGNAL_TYPE_VIRTUAL && + params[i].sink->sink_signal != SIGNAL_TYPE_NONE) + DRM_DEBUG_DRIVER("%s i=%d dispname=%s\n", __func__, i, + params[i].sink->edid_caps.display_name); + } + + DRM_DEBUG_DRIVER("dsc=%d bits_per_pixel=%d pbn=%d\n", + params[i].timing->flags.DSC, + params[i].timing->dsc_cfg.bits_per_pixel, + vars[i + k].pbn); + } +} + +static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn) +{ + struct dc_dsc_config dsc_config; + u64 kbps; + + struct drm_connector *drm_connector = ¶m.aconnector->base; + struct dc_dsc_config_options dsc_options = {0}; + + dc_dsc_get_default_config_option(param.sink->ctx->dc, &dsc_options); + dsc_options.max_target_bpp_limit_override_x16 = drm_connector->display_info.max_dsc_bpp * 16; + + kbps = div_u64((u64)pbn * 994 * 8 * 54, 64); + dc_dsc_compute_config( + param.sink->ctx->dc->res_pool->dscs[0], + ¶m.sink->dsc_caps.dsc_dec_caps, + &dsc_options, + (int) kbps, param.timing, + dc_link_get_highest_encoding_format(param.aconnector->dc_link), + &dsc_config); + + return dsc_config.bits_per_pixel; +} + +static int increase_dsc_bpp(struct drm_atomic_state *state, + struct drm_dp_mst_topology_state *mst_state, + struct dc_link *dc_link, + struct dsc_mst_fairness_params *params, + struct dsc_mst_fairness_vars *vars, + int count, + int k) +{ + int i; + bool bpp_increased[MAX_PIPES]; + int initial_slack[MAX_PIPES]; + int min_initial_slack; + int next_index; + int remaining_to_increase = 0; + int link_timeslots_used; + int fair_pbn_alloc; + int ret = 0; + uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link); + + for (i = 0; i < count; i++) { + if (vars[i + k].dsc_enabled) { + initial_slack[i] = + kbps_to_peak_pbn(params[i].bw_range.max_kbps, fec_overhead_multiplier_x1000) - vars[i + k].pbn; + bpp_increased[i] = false; + remaining_to_increase += 1; + } else { + initial_slack[i] = 0; + bpp_increased[i] = true; + } + } + + while (remaining_to_increase) { + next_index = -1; + min_initial_slack = -1; + for (i = 0; i < count; i++) { + if (!bpp_increased[i]) { + if (min_initial_slack == -1 || min_initial_slack > initial_slack[i]) { + min_initial_slack = initial_slack[i]; + next_index = i; + } + } + } + + if (next_index == -1) + break; + + link_timeslots_used = 0; + + for (i = 0; i < count; i++) + link_timeslots_used += DIV_ROUND_UP(vars[i + k].pbn, mst_state->pbn_div); + + fair_pbn_alloc = + (63 - link_timeslots_used) / remaining_to_increase * mst_state->pbn_div; + + if (initial_slack[next_index] > fair_pbn_alloc) { + vars[next_index].pbn += fair_pbn_alloc; + ret = drm_dp_atomic_find_time_slots(state, + params[next_index].port->mgr, + params[next_index].port, + vars[next_index].pbn); + if (ret < 0) + return ret; + + ret = drm_dp_mst_atomic_check(state); + if (ret == 0) { + vars[next_index].bpp_x16 = bpp_x16_from_pbn(params[next_index], vars[next_index].pbn); + } else { + vars[next_index].pbn -= fair_pbn_alloc; + ret = drm_dp_atomic_find_time_slots(state, + params[next_index].port->mgr, + params[next_index].port, + vars[next_index].pbn); + if (ret < 0) + return ret; + } + } else { + vars[next_index].pbn += initial_slack[next_index]; + ret = drm_dp_atomic_find_time_slots(state, + params[next_index].port->mgr, + params[next_index].port, + vars[next_index].pbn); + if (ret < 0) + return ret; + + ret = drm_dp_mst_atomic_check(state); + if (ret == 0) { + vars[next_index].bpp_x16 = params[next_index].bw_range.max_target_bpp_x16; + } else { + vars[next_index].pbn -= initial_slack[next_index]; + ret = drm_dp_atomic_find_time_slots(state, + params[next_index].port->mgr, + params[next_index].port, + vars[next_index].pbn); + if (ret < 0) + return ret; + } + } + + bpp_increased[next_index] = true; + remaining_to_increase--; + } + return 0; +} + +static int try_disable_dsc(struct drm_atomic_state *state, + struct dc_link *dc_link, + struct dsc_mst_fairness_params *params, + struct dsc_mst_fairness_vars *vars, + int count, + int k) +{ + int i; + bool tried[MAX_PIPES]; + int kbps_increase[MAX_PIPES]; + int max_kbps_increase; + int next_index; + int remaining_to_try = 0; + int ret; + uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link); + + for (i = 0; i < count; i++) { + if (vars[i + k].dsc_enabled + && vars[i + k].bpp_x16 == params[i].bw_range.max_target_bpp_x16 + && params[i].clock_force_enable == DSC_CLK_FORCE_DEFAULT) { + kbps_increase[i] = params[i].bw_range.stream_kbps - params[i].bw_range.max_kbps; + tried[i] = false; + remaining_to_try += 1; + } else { + kbps_increase[i] = 0; + tried[i] = true; + } + } + + while (remaining_to_try) { + next_index = -1; + max_kbps_increase = -1; + for (i = 0; i < count; i++) { + if (!tried[i]) { + if (max_kbps_increase == -1 || max_kbps_increase < kbps_increase[i]) { + max_kbps_increase = kbps_increase[i]; + next_index = i; + } + } + } + + if (next_index == -1) + break; + + vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.stream_kbps, fec_overhead_multiplier_x1000); + ret = drm_dp_atomic_find_time_slots(state, + params[next_index].port->mgr, + params[next_index].port, + vars[next_index].pbn); + if (ret < 0) + return ret; + + ret = drm_dp_mst_atomic_check(state); + if (ret == 0) { + vars[next_index].dsc_enabled = false; + vars[next_index].bpp_x16 = 0; + } else { + vars[next_index].pbn = kbps_to_peak_pbn(params[next_index].bw_range.max_kbps, fec_overhead_multiplier_x1000); + ret = drm_dp_atomic_find_time_slots(state, + params[next_index].port->mgr, + params[next_index].port, + vars[next_index].pbn); + if (ret < 0) + return ret; + } + + tried[next_index] = true; + remaining_to_try--; + } + return 0; +} + +static int compute_mst_dsc_configs_for_link(struct drm_atomic_state *state, + struct dc_state *dc_state, + struct dc_link *dc_link, + struct dsc_mst_fairness_vars *vars, + struct drm_dp_mst_topology_mgr *mgr, + int *link_vars_start_index) +{ + struct dc_stream_state *stream; + struct dsc_mst_fairness_params params[MAX_PIPES]; + struct amdgpu_dm_connector *aconnector; + struct drm_dp_mst_topology_state *mst_state = drm_atomic_get_mst_topology_state(state, mgr); + int count = 0; + int i, k, ret; + bool debugfs_overwrite = false; + uint16_t fec_overhead_multiplier_x1000 = get_fec_overhead_multiplier(dc_link); + + memset(params, 0, sizeof(params)); + + if (IS_ERR(mst_state)) + return PTR_ERR(mst_state); + + /* Set up params */ + for (i = 0; i < dc_state->stream_count; i++) { + struct dc_dsc_policy dsc_policy = {0}; + + stream = dc_state->streams[i]; + + if (stream->link != dc_link) + continue; + + aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; + if (!aconnector) + continue; + + if (!aconnector->mst_output_port) + continue; + + stream->timing.flags.DSC = 0; + + params[count].timing = &stream->timing; + params[count].sink = stream->sink; + params[count].aconnector = aconnector; + params[count].port = aconnector->mst_output_port; + params[count].clock_force_enable = aconnector->dsc_settings.dsc_force_enable; + if (params[count].clock_force_enable == DSC_CLK_FORCE_ENABLE) + debugfs_overwrite = true; + params[count].num_slices_h = aconnector->dsc_settings.dsc_num_slices_h; + params[count].num_slices_v = aconnector->dsc_settings.dsc_num_slices_v; + params[count].bpp_overwrite = aconnector->dsc_settings.dsc_bits_per_pixel; + params[count].compression_possible = stream->sink->dsc_caps.dsc_dec_caps.is_dsc_supported; + dc_dsc_get_policy_for_timing(params[count].timing, 0, &dsc_policy); + if (!dc_dsc_compute_bandwidth_range( + stream->sink->ctx->dc->res_pool->dscs[0], + stream->sink->ctx->dc->debug.dsc_min_slice_height_override, + dsc_policy.min_target_bpp * 16, + dsc_policy.max_target_bpp * 16, + &stream->sink->dsc_caps.dsc_dec_caps, + &stream->timing, + dc_link_get_highest_encoding_format(dc_link), + ¶ms[count].bw_range)) + params[count].bw_range.stream_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing, + dc_link_get_highest_encoding_format(dc_link)); + + count++; + } + + if (count == 0) { + ASSERT(0); + return 0; + } + + /* k is start index of vars for current phy link used by mst hub */ + k = *link_vars_start_index; + /* set vars start index for next mst hub phy link */ + *link_vars_start_index += count; + + /* Try no compression */ + for (i = 0; i < count; i++) { + vars[i + k].aconnector = params[i].aconnector; + vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000); + vars[i + k].dsc_enabled = false; + vars[i + k].bpp_x16 = 0; + ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, params[i].port, + vars[i + k].pbn); + if (ret < 0) + return ret; + } + ret = drm_dp_mst_atomic_check(state); + if (ret == 0 && !debugfs_overwrite) { + set_dsc_configs_from_fairness_vars(params, vars, count, k); + return 0; + } else if (ret != -ENOSPC) { + return ret; + } + + /* Try max compression */ + for (i = 0; i < count; i++) { + if (params[i].compression_possible && params[i].clock_force_enable != DSC_CLK_FORCE_DISABLE) { + vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.min_kbps, fec_overhead_multiplier_x1000); + vars[i + k].dsc_enabled = true; + vars[i + k].bpp_x16 = params[i].bw_range.min_target_bpp_x16; + ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, + params[i].port, vars[i + k].pbn); + if (ret < 0) + return ret; + } else { + vars[i + k].pbn = kbps_to_peak_pbn(params[i].bw_range.stream_kbps, fec_overhead_multiplier_x1000); + vars[i + k].dsc_enabled = false; + vars[i + k].bpp_x16 = 0; + ret = drm_dp_atomic_find_time_slots(state, params[i].port->mgr, + params[i].port, vars[i + k].pbn); + if (ret < 0) + return ret; + } + } + ret = drm_dp_mst_atomic_check(state); + if (ret != 0) + return ret; + + /* Optimize degree of compression */ + ret = increase_dsc_bpp(state, mst_state, dc_link, params, vars, count, k); + if (ret < 0) + return ret; + + ret = try_disable_dsc(state, dc_link, params, vars, count, k); + if (ret < 0) + return ret; + + set_dsc_configs_from_fairness_vars(params, vars, count, k); + + return 0; +} + +static bool is_dsc_need_re_compute( + struct drm_atomic_state *state, + struct dc_state *dc_state, + struct dc_link *dc_link) +{ + int i, j; + bool is_dsc_need_re_compute = false; + struct amdgpu_dm_connector *stream_on_link[MAX_PIPES]; + int new_stream_on_link_num = 0; + struct amdgpu_dm_connector *aconnector; + struct dc_stream_state *stream; + const struct dc *dc = dc_link->dc; + + /* only check phy used by dsc mst branch */ + if (dc_link->type != dc_connection_mst_branch) + return false; + + if (!(dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT || + dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_PASSTHROUGH_SUPPORT)) + return false; + + for (i = 0; i < MAX_PIPES; i++) + stream_on_link[i] = NULL; + + /* check if there is mode change in new request */ + for (i = 0; i < dc_state->stream_count; i++) { + struct drm_crtc_state *new_crtc_state; + struct drm_connector_state *new_conn_state; + + stream = dc_state->streams[i]; + if (!stream) + continue; + + /* check if stream using the same link for mst */ + if (stream->link != dc_link) + continue; + + aconnector = (struct amdgpu_dm_connector *) stream->dm_stream_context; + if (!aconnector) + continue; + + stream_on_link[new_stream_on_link_num] = aconnector; + new_stream_on_link_num++; + + new_conn_state = drm_atomic_get_new_connector_state(state, &aconnector->base); + if (!new_conn_state) + continue; + + if (IS_ERR(new_conn_state)) + continue; + + if (!new_conn_state->crtc) + continue; + + new_crtc_state = drm_atomic_get_new_crtc_state(state, new_conn_state->crtc); + if (!new_crtc_state) + continue; + + if (IS_ERR(new_crtc_state)) + continue; + + if (new_crtc_state->enable && new_crtc_state->active) { + if (new_crtc_state->mode_changed || new_crtc_state->active_changed || + new_crtc_state->connectors_changed) + return true; + } + } + + /* check current_state if there stream on link but it is not in + * new request state + */ + for (i = 0; i < dc->current_state->stream_count; i++) { + stream = dc->current_state->streams[i]; + /* only check stream on the mst hub */ + if (stream->link != dc_link) + continue; + + aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; + if (!aconnector) + continue; + + for (j = 0; j < new_stream_on_link_num; j++) { + if (stream_on_link[j]) { + if (aconnector == stream_on_link[j]) + break; + } + } + + if (j == new_stream_on_link_num) { + /* not in new state */ + is_dsc_need_re_compute = true; + break; + } + } + + return is_dsc_need_re_compute; +} + +int compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, + struct dc_state *dc_state, + struct dsc_mst_fairness_vars *vars) +{ + int i, j; + struct dc_stream_state *stream; + bool computed_streams[MAX_PIPES]; + struct amdgpu_dm_connector *aconnector; + struct drm_dp_mst_topology_mgr *mst_mgr; + struct resource_pool *res_pool; + int link_vars_start_index = 0; + int ret = 0; + + for (i = 0; i < dc_state->stream_count; i++) + computed_streams[i] = false; + + for (i = 0; i < dc_state->stream_count; i++) { + stream = dc_state->streams[i]; + res_pool = stream->ctx->dc->res_pool; + + if (stream->signal != SIGNAL_TYPE_DISPLAY_PORT_MST) + continue; + + aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; + + if (!aconnector || !aconnector->dc_sink || !aconnector->mst_output_port) + continue; + + if (!aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported) + continue; + + if (computed_streams[i]) + continue; + + if (res_pool->funcs->remove_stream_from_ctx && + res_pool->funcs->remove_stream_from_ctx(stream->ctx->dc, dc_state, stream) != DC_OK) + return -EINVAL; + + if (!is_dsc_need_re_compute(state, dc_state, stream->link)) + continue; + + mst_mgr = aconnector->mst_output_port->mgr; + ret = compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars, mst_mgr, + &link_vars_start_index); + if (ret != 0) + return ret; + + for (j = 0; j < dc_state->stream_count; j++) { + if (dc_state->streams[j]->link == stream->link) + computed_streams[j] = true; + } + } + + for (i = 0; i < dc_state->stream_count; i++) { + stream = dc_state->streams[i]; + + if (stream->timing.flags.DSC == 1) + if (dc_stream_add_dsc_to_resource(stream->ctx->dc, dc_state, stream) != DC_OK) + return -EINVAL; + } + + return ret; +} + +static int pre_compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, + struct dc_state *dc_state, + struct dsc_mst_fairness_vars *vars) +{ + int i, j; + struct dc_stream_state *stream; + bool computed_streams[MAX_PIPES]; + struct amdgpu_dm_connector *aconnector; + struct drm_dp_mst_topology_mgr *mst_mgr; + int link_vars_start_index = 0; + int ret = 0; + + for (i = 0; i < dc_state->stream_count; i++) + computed_streams[i] = false; + + for (i = 0; i < dc_state->stream_count; i++) { + stream = dc_state->streams[i]; + + if (stream->signal != SIGNAL_TYPE_DISPLAY_PORT_MST) + continue; + + aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context; + + if (!aconnector || !aconnector->dc_sink || !aconnector->mst_output_port) + continue; + + if (!aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported) + continue; + + if (computed_streams[i]) + continue; + + if (!is_dsc_need_re_compute(state, dc_state, stream->link)) + continue; + + mst_mgr = aconnector->mst_output_port->mgr; + ret = compute_mst_dsc_configs_for_link(state, dc_state, stream->link, vars, mst_mgr, + &link_vars_start_index); + if (ret != 0) + return ret; + + for (j = 0; j < dc_state->stream_count; j++) { + if (dc_state->streams[j]->link == stream->link) + computed_streams[j] = true; + } + } + + return ret; +} + +static int find_crtc_index_in_state_by_stream(struct drm_atomic_state *state, + struct dc_stream_state *stream) +{ + int i; + struct drm_crtc *crtc; + struct drm_crtc_state *new_state, *old_state; + + for_each_oldnew_crtc_in_state(state, crtc, old_state, new_state, i) { + struct dm_crtc_state *dm_state = to_dm_crtc_state(new_state); + + if (dm_state->stream == stream) + return i; + } + return -1; +} + +static bool is_link_to_dschub(struct dc_link *dc_link) +{ + union dpcd_dsc_basic_capabilities *dsc_caps = + &dc_link->dpcd_caps.dsc_caps.dsc_basic_caps; + + /* only check phy used by dsc mst branch */ + if (dc_link->type != dc_connection_mst_branch) + return false; + + if (!(dsc_caps->fields.dsc_support.DSC_SUPPORT || + dsc_caps->fields.dsc_support.DSC_PASSTHROUGH_SUPPORT)) + return false; + return true; +} + +static bool is_dsc_precompute_needed(struct drm_atomic_state *state) +{ + int i; + struct drm_crtc *crtc; + struct drm_crtc_state *old_crtc_state, *new_crtc_state; + bool ret = false; + + for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { + struct dm_crtc_state *dm_crtc_state = to_dm_crtc_state(new_crtc_state); + + if (!amdgpu_dm_find_first_crtc_matching_connector(state, crtc)) { + ret = false; + break; + } + if (dm_crtc_state->stream && dm_crtc_state->stream->link) + if (is_link_to_dschub(dm_crtc_state->stream->link)) + ret = true; + } + return ret; +} + +int pre_validate_dsc(struct drm_atomic_state *state, + struct dm_atomic_state **dm_state_ptr, + struct dsc_mst_fairness_vars *vars) +{ + int i; + struct dm_atomic_state *dm_state; + struct dc_state *local_dc_state = NULL; + int ret = 0; + + if (!is_dsc_precompute_needed(state)) { + DRM_INFO_ONCE("DSC precompute is not needed.\n"); + return 0; + } + ret = dm_atomic_get_state(state, dm_state_ptr); + if (ret != 0) { + DRM_INFO_ONCE("dm_atomic_get_state() failed\n"); + return ret; + } + dm_state = *dm_state_ptr; + + /* + * create local vailable for dc_state. copy content of streams of dm_state->context + * to local variable. make sure stream pointer of local variable not the same as stream + * from dm_state->context. + */ + + local_dc_state = kmemdup(dm_state->context, sizeof(struct dc_state), GFP_KERNEL); + if (!local_dc_state) + return -ENOMEM; + + for (i = 0; i < local_dc_state->stream_count; i++) { + struct dc_stream_state *stream = dm_state->context->streams[i]; + int ind = find_crtc_index_in_state_by_stream(state, stream); + + if (ind >= 0) { + struct amdgpu_dm_connector *aconnector; + struct drm_connector_state *drm_new_conn_state; + struct dm_connector_state *dm_new_conn_state; + struct dm_crtc_state *dm_old_crtc_state; + + aconnector = + amdgpu_dm_find_first_crtc_matching_connector(state, + state->crtcs[ind].ptr); + drm_new_conn_state = + drm_atomic_get_new_connector_state(state, + &aconnector->base); + dm_new_conn_state = to_dm_connector_state(drm_new_conn_state); + dm_old_crtc_state = to_dm_crtc_state(state->crtcs[ind].old_state); + + local_dc_state->streams[i] = + create_validate_stream_for_sink(aconnector, + &state->crtcs[ind].new_state->mode, + dm_new_conn_state, + dm_old_crtc_state->stream); + if (local_dc_state->streams[i] == NULL) { + ret = -EINVAL; + break; + } + } + } + + if (ret != 0) + goto clean_exit; + + ret = pre_compute_mst_dsc_configs_for_state(state, local_dc_state, vars); + if (ret != 0) { + DRM_INFO_ONCE("pre_compute_mst_dsc_configs_for_state() failed\n"); + ret = -EINVAL; + goto clean_exit; + } + + /* + * compare local_streams -> timing with dm_state->context, + * if the same set crtc_state->mode-change = 0; + */ + for (i = 0; i < local_dc_state->stream_count; i++) { + struct dc_stream_state *stream = dm_state->context->streams[i]; + + if (local_dc_state->streams[i] && + dc_is_timing_changed(stream, local_dc_state->streams[i])) { + DRM_INFO_ONCE("crtc[%d] needs mode_changed\n", i); + } else { + int ind = find_crtc_index_in_state_by_stream(state, stream); + + if (ind >= 0) + state->crtcs[ind].new_state->mode_changed = 0; + } + } +clean_exit: + for (i = 0; i < local_dc_state->stream_count; i++) { + struct dc_stream_state *stream = dm_state->context->streams[i]; + + if (local_dc_state->streams[i] != stream) + dc_stream_release(local_dc_state->streams[i]); + } + + kfree(local_dc_state); + + return ret; +} + +static unsigned int kbps_from_pbn(unsigned int pbn) +{ + unsigned int kbps = pbn; + + kbps *= (1000000 / PEAK_FACTOR_X1000); + kbps *= 8; + kbps *= 54; + kbps /= 64; + + return kbps; +} + +static bool is_dsc_common_config_possible(struct dc_stream_state *stream, + struct dc_dsc_bw_range *bw_range) +{ + struct dc_dsc_policy dsc_policy = {0}; + + dc_dsc_get_policy_for_timing(&stream->timing, 0, &dsc_policy); + dc_dsc_compute_bandwidth_range(stream->sink->ctx->dc->res_pool->dscs[0], + stream->sink->ctx->dc->debug.dsc_min_slice_height_override, + dsc_policy.min_target_bpp * 16, + dsc_policy.max_target_bpp * 16, + &stream->sink->dsc_caps.dsc_dec_caps, + &stream->timing, dc_link_get_highest_encoding_format(stream->link), bw_range); + + return bw_range->max_target_bpp_x16 && bw_range->min_target_bpp_x16; +} + +enum dc_status dm_dp_mst_is_port_support_mode( + struct amdgpu_dm_connector *aconnector, + struct dc_stream_state *stream) +{ + int bpp, pbn, branch_max_throughput_mps = 0; + struct dc_link_settings cur_link_settings; + unsigned int end_to_end_bw_in_kbps = 0; + unsigned int upper_link_bw_in_kbps = 0, down_link_bw_in_kbps = 0; + unsigned int max_compressed_bw_in_kbps = 0; + struct dc_dsc_bw_range bw_range = {0}; + uint16_t full_pbn = aconnector->mst_output_port->full_pbn; + + /* + * Consider the case with the depth of the mst topology tree is equal or less than 2 + * A. When dsc bitstream can be transmitted along the entire path + * 1. dsc is possible between source and branch/leaf device (common dsc params is possible), AND + * 2. dsc passthrough supported at MST branch, or + * 3. dsc decoding supported at leaf MST device + * Use maximum dsc compression as bw constraint + * B. When dsc bitstream cannot be transmitted along the entire path + * Use native bw as bw constraint + */ + if (is_dsc_common_config_possible(stream, &bw_range) && + (aconnector->mst_output_port->passthrough_aux || + aconnector->dsc_aux == &aconnector->mst_output_port->aux)) { + cur_link_settings = stream->link->verified_link_cap; + + upper_link_bw_in_kbps = dc_link_bandwidth_kbps(aconnector->dc_link, + &cur_link_settings); + down_link_bw_in_kbps = kbps_from_pbn(full_pbn); + + /* pick the bottleneck */ + end_to_end_bw_in_kbps = min(upper_link_bw_in_kbps, + down_link_bw_in_kbps); + + /* + * use the maximum dsc compression bandwidth as the required + * bandwidth for the mode + */ + max_compressed_bw_in_kbps = bw_range.min_kbps; + + if (end_to_end_bw_in_kbps < max_compressed_bw_in_kbps) { + DRM_DEBUG_DRIVER("Mode does not fit into DSC pass-through bandwidth validation\n"); + return DC_FAIL_BANDWIDTH_VALIDATE; + } + } else { + /* check if mode could be supported within full_pbn */ + bpp = convert_dc_color_depth_into_bpc(stream->timing.display_color_depth) * 3; + pbn = drm_dp_calc_pbn_mode(stream->timing.pix_clk_100hz / 10, bpp << 4); + if (pbn > full_pbn) + return DC_FAIL_BANDWIDTH_VALIDATE; + } + + /* check is mst dsc output bandwidth branch_overall_throughput_0_mps */ + switch (stream->timing.pixel_encoding) { + case PIXEL_ENCODING_RGB: + case PIXEL_ENCODING_YCBCR444: + branch_max_throughput_mps = + aconnector->dc_sink->dsc_caps.dsc_dec_caps.branch_overall_throughput_0_mps; + break; + case PIXEL_ENCODING_YCBCR422: + case PIXEL_ENCODING_YCBCR420: + branch_max_throughput_mps = + aconnector->dc_sink->dsc_caps.dsc_dec_caps.branch_overall_throughput_1_mps; + break; + default: + break; + } + + if (branch_max_throughput_mps != 0 && + ((stream->timing.pix_clk_100hz / 10) > branch_max_throughput_mps * 1000)) + return DC_FAIL_BANDWIDTH_VALIDATE; + + return DC_OK; +} diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h new file mode 100644 index 0000000000..37c820ab0f --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h @@ -0,0 +1,96 @@ +/* + * Copyright 2012-15 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DAL_AMDGPU_DM_MST_TYPES_H__ +#define __DAL_AMDGPU_DM_MST_TYPES_H__ + +#define DP_BRANCH_DEVICE_ID_90CC24 0x90CC24 + +#define SYNAPTICS_RC_COMMAND 0x4B2 +#define SYNAPTICS_RC_RESULT 0x4B3 +#define SYNAPTICS_RC_LENGTH 0x4B8 +#define SYNAPTICS_RC_OFFSET 0x4BC +#define SYNAPTICS_RC_DATA 0x4C0 + +#define DP_BRANCH_VENDOR_SPECIFIC_START 0x50C + +/** + * Panamera MST Hub detection + * Offset DPCD 050Eh == 0x5A indicates cascaded MST hub case + * Check from beginning of branch device vendor specific field (050Ch) + */ +#define IS_SYNAPTICS_PANAMERA(branchDevName) (((int)branchDevName[4] & 0xF0) == 0x50 ? 1 : 0) +#define BRANCH_HW_REVISION_PANAMERA_A2 0x10 +#define SYNAPTICS_CASCADED_HUB_ID 0x5A +#define IS_SYNAPTICS_CASCADED_PANAMERA(devName, data) ((IS_SYNAPTICS_PANAMERA(devName) && ((int)data[2] == SYNAPTICS_CASCADED_HUB_ID)) ? 1 : 0) + +#define PBN_FEC_OVERHEAD_MULTIPLIER_8B_10B 1031 +#define PBN_FEC_OVERHEAD_MULTIPLIER_128B_132B 1000 + +enum mst_msg_ready_type { + NONE_MSG_RDY_EVENT = 0, + DOWN_REP_MSG_RDY_EVENT = 1, + UP_REQ_MSG_RDY_EVENT = 2, + DOWN_OR_UP_MSG_RDY_EVENT = 3 +}; + +struct amdgpu_display_manager; +struct amdgpu_dm_connector; + +int dm_mst_get_pbn_divider(struct dc_link *link); + +void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm, + struct amdgpu_dm_connector *aconnector, + int link_index); + +void +dm_dp_create_fake_mst_encoders(struct amdgpu_device *adev); + +void dm_handle_mst_sideband_msg_ready_event( + struct drm_dp_mst_topology_mgr *mgr, + enum mst_msg_ready_type msg_rdy_type); + +struct dsc_mst_fairness_vars { + int pbn; + bool dsc_enabled; + int bpp_x16; + struct amdgpu_dm_connector *aconnector; +}; + +int compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, + struct dc_state *dc_state, + struct dsc_mst_fairness_vars *vars); + +bool needs_dsc_aux_workaround(struct dc_link *link); + +int pre_validate_dsc(struct drm_atomic_state *state, + struct dm_atomic_state **dm_state_ptr, + struct dsc_mst_fairness_vars *vars); + +enum dc_status dm_dp_mst_is_port_support_mode( + struct amdgpu_dm_connector *aconnector, + struct dc_stream_state *stream); + +#endif diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c new file mode 100644 index 0000000000..cc74dd69ac --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c @@ -0,0 +1,1534 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include <drm/drm_atomic_helper.h> +#include <drm/drm_blend.h> +#include <drm/drm_gem_atomic_helper.h> +#include <drm/drm_plane_helper.h> +#include <drm/drm_fourcc.h> + +#include "amdgpu.h" +#include "dal_asic_id.h" +#include "amdgpu_display.h" +#include "amdgpu_dm_trace.h" +#include "amdgpu_dm_plane.h" +#include "gc/gc_11_0_0_offset.h" +#include "gc/gc_11_0_0_sh_mask.h" + +/* + * TODO: these are currently initialized to rgb formats only. + * For future use cases we should either initialize them dynamically based on + * plane capabilities, or initialize this array to all formats, so internal drm + * check will succeed, and let DC implement proper check + */ +static const uint32_t rgb_formats[] = { + DRM_FORMAT_XRGB8888, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_RGBA8888, + DRM_FORMAT_XRGB2101010, + DRM_FORMAT_XBGR2101010, + DRM_FORMAT_ARGB2101010, + DRM_FORMAT_ABGR2101010, + DRM_FORMAT_XRGB16161616, + DRM_FORMAT_XBGR16161616, + DRM_FORMAT_ARGB16161616, + DRM_FORMAT_ABGR16161616, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_RGB565, +}; + +static const uint32_t overlay_formats[] = { + DRM_FORMAT_XRGB8888, + DRM_FORMAT_ARGB8888, + DRM_FORMAT_RGBA8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_RGB565, + DRM_FORMAT_NV21, + DRM_FORMAT_NV12, + DRM_FORMAT_P010 +}; + +static const uint32_t video_formats[] = { + DRM_FORMAT_NV21, + DRM_FORMAT_NV12, + DRM_FORMAT_P010 +}; + +static const u32 cursor_formats[] = { + DRM_FORMAT_ARGB8888 +}; + +enum dm_micro_swizzle { + MICRO_SWIZZLE_Z = 0, + MICRO_SWIZZLE_S = 1, + MICRO_SWIZZLE_D = 2, + MICRO_SWIZZLE_R = 3 +}; + +const struct drm_format_info *amdgpu_dm_plane_get_format_info(const struct drm_mode_fb_cmd2 *cmd) +{ + return amdgpu_lookup_format_info(cmd->pixel_format, cmd->modifier[0]); +} + +void amdgpu_dm_plane_fill_blending_from_plane_state(const struct drm_plane_state *plane_state, + bool *per_pixel_alpha, bool *pre_multiplied_alpha, + bool *global_alpha, int *global_alpha_value) +{ + *per_pixel_alpha = false; + *pre_multiplied_alpha = true; + *global_alpha = false; + *global_alpha_value = 0xff; + + if (plane_state->plane->type != DRM_PLANE_TYPE_OVERLAY) + return; + + if (plane_state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI || + plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) { + static const uint32_t alpha_formats[] = { + DRM_FORMAT_ARGB8888, + DRM_FORMAT_RGBA8888, + DRM_FORMAT_ABGR8888, + DRM_FORMAT_ARGB2101010, + DRM_FORMAT_ABGR2101010, + DRM_FORMAT_ARGB16161616, + DRM_FORMAT_ABGR16161616, + DRM_FORMAT_ARGB16161616F, + }; + uint32_t format = plane_state->fb->format->format; + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(alpha_formats); ++i) { + if (format == alpha_formats[i]) { + *per_pixel_alpha = true; + break; + } + } + + if (*per_pixel_alpha && plane_state->pixel_blend_mode == DRM_MODE_BLEND_COVERAGE) + *pre_multiplied_alpha = false; + } + + if (plane_state->alpha < 0xffff) { + *global_alpha = true; + *global_alpha_value = plane_state->alpha >> 8; + } +} + +static void add_modifier(uint64_t **mods, uint64_t *size, uint64_t *cap, uint64_t mod) +{ + if (!*mods) + return; + + if (*cap - *size < 1) { + uint64_t new_cap = *cap * 2; + uint64_t *new_mods = kmalloc(new_cap * sizeof(uint64_t), GFP_KERNEL); + + if (!new_mods) { + kfree(*mods); + *mods = NULL; + return; + } + + memcpy(new_mods, *mods, sizeof(uint64_t) * *size); + kfree(*mods); + *mods = new_mods; + *cap = new_cap; + } + + (*mods)[*size] = mod; + *size += 1; +} + +static bool modifier_has_dcc(uint64_t modifier) +{ + return IS_AMD_FMT_MOD(modifier) && AMD_FMT_MOD_GET(DCC, modifier); +} + +static unsigned int modifier_gfx9_swizzle_mode(uint64_t modifier) +{ + if (modifier == DRM_FORMAT_MOD_LINEAR) + return 0; + + return AMD_FMT_MOD_GET(TILE, modifier); +} + +static void fill_gfx8_tiling_info_from_flags(union dc_tiling_info *tiling_info, + uint64_t tiling_flags) +{ + /* Fill GFX8 params */ + if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) == DC_ARRAY_2D_TILED_THIN1) { + unsigned int bankw, bankh, mtaspect, tile_split, num_banks; + + bankw = AMDGPU_TILING_GET(tiling_flags, BANK_WIDTH); + bankh = AMDGPU_TILING_GET(tiling_flags, BANK_HEIGHT); + mtaspect = AMDGPU_TILING_GET(tiling_flags, MACRO_TILE_ASPECT); + tile_split = AMDGPU_TILING_GET(tiling_flags, TILE_SPLIT); + num_banks = AMDGPU_TILING_GET(tiling_flags, NUM_BANKS); + + /* XXX fix me for VI */ + tiling_info->gfx8.num_banks = num_banks; + tiling_info->gfx8.array_mode = + DC_ARRAY_2D_TILED_THIN1; + tiling_info->gfx8.tile_split = tile_split; + tiling_info->gfx8.bank_width = bankw; + tiling_info->gfx8.bank_height = bankh; + tiling_info->gfx8.tile_aspect = mtaspect; + tiling_info->gfx8.tile_mode = + DC_ADDR_SURF_MICRO_TILING_DISPLAY; + } else if (AMDGPU_TILING_GET(tiling_flags, ARRAY_MODE) + == DC_ARRAY_1D_TILED_THIN1) { + tiling_info->gfx8.array_mode = DC_ARRAY_1D_TILED_THIN1; + } + + tiling_info->gfx8.pipe_config = + AMDGPU_TILING_GET(tiling_flags, PIPE_CONFIG); +} + +static void fill_gfx9_tiling_info_from_device(const struct amdgpu_device *adev, + union dc_tiling_info *tiling_info) +{ + /* Fill GFX9 params */ + tiling_info->gfx9.num_pipes = + adev->gfx.config.gb_addr_config_fields.num_pipes; + tiling_info->gfx9.num_banks = + adev->gfx.config.gb_addr_config_fields.num_banks; + tiling_info->gfx9.pipe_interleave = + adev->gfx.config.gb_addr_config_fields.pipe_interleave_size; + tiling_info->gfx9.num_shader_engines = + adev->gfx.config.gb_addr_config_fields.num_se; + tiling_info->gfx9.max_compressed_frags = + adev->gfx.config.gb_addr_config_fields.max_compress_frags; + tiling_info->gfx9.num_rb_per_se = + adev->gfx.config.gb_addr_config_fields.num_rb_per_se; + tiling_info->gfx9.shaderEnable = 1; + if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) + tiling_info->gfx9.num_pkrs = adev->gfx.config.gb_addr_config_fields.num_pkrs; +} + +static void fill_gfx9_tiling_info_from_modifier(const struct amdgpu_device *adev, + union dc_tiling_info *tiling_info, + uint64_t modifier) +{ + unsigned int mod_bank_xor_bits = AMD_FMT_MOD_GET(BANK_XOR_BITS, modifier); + unsigned int mod_pipe_xor_bits = AMD_FMT_MOD_GET(PIPE_XOR_BITS, modifier); + unsigned int pkrs_log2 = AMD_FMT_MOD_GET(PACKERS, modifier); + unsigned int pipes_log2; + + pipes_log2 = min(5u, mod_pipe_xor_bits); + + fill_gfx9_tiling_info_from_device(adev, tiling_info); + + if (!IS_AMD_FMT_MOD(modifier)) + return; + + tiling_info->gfx9.num_pipes = 1u << pipes_log2; + tiling_info->gfx9.num_shader_engines = 1u << (mod_pipe_xor_bits - pipes_log2); + + if (adev->family >= AMDGPU_FAMILY_NV) { + tiling_info->gfx9.num_pkrs = 1u << pkrs_log2; + } else { + tiling_info->gfx9.num_banks = 1u << mod_bank_xor_bits; + + /* for DCC we know it isn't rb aligned, so rb_per_se doesn't matter. */ + } +} + +static int validate_dcc(struct amdgpu_device *adev, + const enum surface_pixel_format format, + const enum dc_rotation_angle rotation, + const union dc_tiling_info *tiling_info, + const struct dc_plane_dcc_param *dcc, + const struct dc_plane_address *address, + const struct plane_size *plane_size) +{ + struct dc *dc = adev->dm.dc; + struct dc_dcc_surface_param input; + struct dc_surface_dcc_cap output; + + memset(&input, 0, sizeof(input)); + memset(&output, 0, sizeof(output)); + + if (!dcc->enable) + return 0; + + if (format >= SURFACE_PIXEL_FORMAT_VIDEO_BEGIN || + !dc->cap_funcs.get_dcc_compression_cap) + return -EINVAL; + + input.format = format; + input.surface_size.width = plane_size->surface_size.width; + input.surface_size.height = plane_size->surface_size.height; + input.swizzle_mode = tiling_info->gfx9.swizzle; + + if (rotation == ROTATION_ANGLE_0 || rotation == ROTATION_ANGLE_180) + input.scan = SCAN_DIRECTION_HORIZONTAL; + else if (rotation == ROTATION_ANGLE_90 || rotation == ROTATION_ANGLE_270) + input.scan = SCAN_DIRECTION_VERTICAL; + + if (!dc->cap_funcs.get_dcc_compression_cap(dc, &input, &output)) + return -EINVAL; + + if (!output.capable) + return -EINVAL; + + if (dcc->independent_64b_blks == 0 && + output.grph.rgb.independent_64b_blks != 0) + return -EINVAL; + + return 0; +} + +static int fill_gfx9_plane_attributes_from_modifiers(struct amdgpu_device *adev, + const struct amdgpu_framebuffer *afb, + const enum surface_pixel_format format, + const enum dc_rotation_angle rotation, + const struct plane_size *plane_size, + union dc_tiling_info *tiling_info, + struct dc_plane_dcc_param *dcc, + struct dc_plane_address *address, + const bool force_disable_dcc) +{ + const uint64_t modifier = afb->base.modifier; + int ret = 0; + + fill_gfx9_tiling_info_from_modifier(adev, tiling_info, modifier); + tiling_info->gfx9.swizzle = modifier_gfx9_swizzle_mode(modifier); + + if (modifier_has_dcc(modifier) && !force_disable_dcc) { + uint64_t dcc_address = afb->address + afb->base.offsets[1]; + bool independent_64b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_64B, modifier); + bool independent_128b_blks = AMD_FMT_MOD_GET(DCC_INDEPENDENT_128B, modifier); + + dcc->enable = 1; + dcc->meta_pitch = afb->base.pitches[1]; + dcc->independent_64b_blks = independent_64b_blks; + if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) >= AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) { + if (independent_64b_blks && independent_128b_blks) + dcc->dcc_ind_blk = hubp_ind_block_64b_no_128bcl; + else if (independent_128b_blks) + dcc->dcc_ind_blk = hubp_ind_block_128b; + else if (independent_64b_blks && !independent_128b_blks) + dcc->dcc_ind_blk = hubp_ind_block_64b; + else + dcc->dcc_ind_blk = hubp_ind_block_unconstrained; + } else { + if (independent_64b_blks) + dcc->dcc_ind_blk = hubp_ind_block_64b; + else + dcc->dcc_ind_blk = hubp_ind_block_unconstrained; + } + + address->grph.meta_addr.low_part = lower_32_bits(dcc_address); + address->grph.meta_addr.high_part = upper_32_bits(dcc_address); + } + + ret = validate_dcc(adev, format, rotation, tiling_info, dcc, address, plane_size); + if (ret) + drm_dbg_kms(adev_to_drm(adev), "validate_dcc: returned error: %d\n", ret); + + return ret; +} + +static void add_gfx10_1_modifiers(const struct amdgpu_device *adev, + uint64_t **mods, uint64_t *size, uint64_t *capacity) +{ + int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes); + + add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); + + add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_RETILE, 1) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); + + add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits)); + + add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits)); + + + /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */ + add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); + + add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); +} + +static void add_gfx9_modifiers(const struct amdgpu_device *adev, + uint64_t **mods, uint64_t *size, uint64_t *capacity) +{ + int pipes = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes); + int pipe_xor_bits = min(8, pipes + + ilog2(adev->gfx.config.gb_addr_config_fields.num_se)); + int bank_xor_bits = min(8 - pipe_xor_bits, + ilog2(adev->gfx.config.gb_addr_config_fields.num_banks)); + int rb = ilog2(adev->gfx.config.gb_addr_config_fields.num_se) + + ilog2(adev->gfx.config.gb_addr_config_fields.num_rb_per_se); + + + if (adev->family == AMDGPU_FAMILY_RV) { + /* Raven2 and later */ + bool has_constant_encode = adev->asic_type > CHIP_RAVEN || adev->external_rev_id >= 0x81; + + /* + * No _D DCC swizzles yet because we only allow 32bpp, which + * doesn't support _D on DCN + */ + + if (has_constant_encode) { + add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | + AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1)); + } + + add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | + AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0)); + + if (has_constant_encode) { + add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | + AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_RETILE, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | + + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | + AMD_FMT_MOD_SET(RB, rb) | + AMD_FMT_MOD_SET(PIPE, pipes)); + } + + add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits) | + AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_RETILE, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 0) | + AMD_FMT_MOD_SET(RB, rb) | + AMD_FMT_MOD_SET(PIPE, pipes)); + } + + /* + * Only supported for 64bpp on Raven, will be filtered on format in + * dm_plane_format_mod_supported. + */ + add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits)); + + if (adev->family == AMDGPU_FAMILY_RV) { + add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(BANK_XOR_BITS, bank_xor_bits)); + } + + /* + * Only supported for 64bpp on Raven, will be filtered on format in + * dm_plane_format_mod_supported. + */ + add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); + + if (adev->family == AMDGPU_FAMILY_RV) { + add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); + } +} + +static void add_gfx10_3_modifiers(const struct amdgpu_device *adev, + uint64_t **mods, uint64_t *size, uint64_t *capacity) +{ + int pipe_xor_bits = ilog2(adev->gfx.config.gb_addr_config_fields.num_pipes); + int pkrs = ilog2(adev->gfx.config.gb_addr_config_fields.num_pkrs); + + add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(PACKERS, pkrs) | + AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); + + add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(PACKERS, pkrs) | + AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B)); + + add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(PACKERS, pkrs) | + AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_RETILE, 1) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B)); + + add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(PACKERS, pkrs) | + AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_RETILE, 1) | + AMD_FMT_MOD_SET(DCC_CONSTANT_ENCODE, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B)); + + add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_R_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(PACKERS, pkrs)); + + add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S_X) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX10_RBPLUS) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(PACKERS, pkrs)); + + /* Only supported for 64bpp, will be filtered in dm_plane_format_mod_supported */ + add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); + + add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_S) | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX9)); +} + +static void add_gfx11_modifiers(struct amdgpu_device *adev, + uint64_t **mods, uint64_t *size, uint64_t *capacity) +{ + int num_pipes = 0; + int pipe_xor_bits = 0; + int num_pkrs = 0; + int pkrs = 0; + u32 gb_addr_config; + u8 i = 0; + unsigned int swizzle_r_x; + uint64_t modifier_r_x; + uint64_t modifier_dcc_best; + uint64_t modifier_dcc_4k; + + /* TODO: GFX11 IP HW init hasnt finish and we get zero if we read from + * adev->gfx.config.gb_addr_config_fields.num_{pkrs,pipes} + */ + gb_addr_config = RREG32_SOC15(GC, 0, regGB_ADDR_CONFIG); + ASSERT(gb_addr_config != 0); + + num_pkrs = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PKRS); + pkrs = ilog2(num_pkrs); + num_pipes = 1 << REG_GET_FIELD(gb_addr_config, GB_ADDR_CONFIG, NUM_PIPES); + pipe_xor_bits = ilog2(num_pipes); + + for (i = 0; i < 2; i++) { + /* Insert the best one first. */ + /* R_X swizzle modes are the best for rendering and DCC requires them. */ + if (num_pipes > 16) + swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX11_256K_R_X : AMD_FMT_MOD_TILE_GFX9_64K_R_X; + else + swizzle_r_x = !i ? AMD_FMT_MOD_TILE_GFX9_64K_R_X : AMD_FMT_MOD_TILE_GFX11_256K_R_X; + + modifier_r_x = AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) | + AMD_FMT_MOD_SET(PIPE_XOR_BITS, pipe_xor_bits) | + AMD_FMT_MOD_SET(TILE, swizzle_r_x) | + AMD_FMT_MOD_SET(PACKERS, pkrs); + + /* DCC_CONSTANT_ENCODE is not set because it can't vary with gfx11 (it's implied to be 1). */ + modifier_dcc_best = modifier_r_x | AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 0) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_128B); + + /* DCC settings for 4K and greater resolutions. (required by display hw) */ + modifier_dcc_4k = modifier_r_x | AMD_FMT_MOD_SET(DCC, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_64B, 1) | + AMD_FMT_MOD_SET(DCC_INDEPENDENT_128B, 1) | + AMD_FMT_MOD_SET(DCC_MAX_COMPRESSED_BLOCK, AMD_FMT_MOD_DCC_BLOCK_64B); + + add_modifier(mods, size, capacity, modifier_dcc_best); + add_modifier(mods, size, capacity, modifier_dcc_4k); + + add_modifier(mods, size, capacity, modifier_dcc_best | AMD_FMT_MOD_SET(DCC_RETILE, 1)); + add_modifier(mods, size, capacity, modifier_dcc_4k | AMD_FMT_MOD_SET(DCC_RETILE, 1)); + + add_modifier(mods, size, capacity, modifier_r_x); + } + + add_modifier(mods, size, capacity, AMD_FMT_MOD | + AMD_FMT_MOD_SET(TILE_VERSION, AMD_FMT_MOD_TILE_VER_GFX11) | + AMD_FMT_MOD_SET(TILE, AMD_FMT_MOD_TILE_GFX9_64K_D)); +} + +static int get_plane_modifiers(struct amdgpu_device *adev, unsigned int plane_type, uint64_t **mods) +{ + uint64_t size = 0, capacity = 128; + *mods = NULL; + + /* We have not hooked up any pre-GFX9 modifiers. */ + if (adev->family < AMDGPU_FAMILY_AI) + return 0; + + *mods = kmalloc(capacity * sizeof(uint64_t), GFP_KERNEL); + + if (plane_type == DRM_PLANE_TYPE_CURSOR) { + add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR); + add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID); + return *mods ? 0 : -ENOMEM; + } + + switch (adev->family) { + case AMDGPU_FAMILY_AI: + case AMDGPU_FAMILY_RV: + add_gfx9_modifiers(adev, mods, &size, &capacity); + break; + case AMDGPU_FAMILY_NV: + case AMDGPU_FAMILY_VGH: + case AMDGPU_FAMILY_YC: + case AMDGPU_FAMILY_GC_10_3_6: + case AMDGPU_FAMILY_GC_10_3_7: + if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) + add_gfx10_3_modifiers(adev, mods, &size, &capacity); + else + add_gfx10_1_modifiers(adev, mods, &size, &capacity); + break; + case AMDGPU_FAMILY_GC_11_0_0: + case AMDGPU_FAMILY_GC_11_0_1: + add_gfx11_modifiers(adev, mods, &size, &capacity); + break; + } + + add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_LINEAR); + + /* INVALID marks the end of the list. */ + add_modifier(mods, &size, &capacity, DRM_FORMAT_MOD_INVALID); + + if (!*mods) + return -ENOMEM; + + return 0; +} + +static int get_plane_formats(const struct drm_plane *plane, + const struct dc_plane_cap *plane_cap, + uint32_t *formats, int max_formats) +{ + int i, num_formats = 0; + + /* + * TODO: Query support for each group of formats directly from + * DC plane caps. This will require adding more formats to the + * caps list. + */ + + if (plane->type == DRM_PLANE_TYPE_PRIMARY || + (plane_cap && plane_cap->type == DC_PLANE_TYPE_DCN_UNIVERSAL && plane->type != DRM_PLANE_TYPE_CURSOR)) { + for (i = 0; i < ARRAY_SIZE(rgb_formats); ++i) { + if (num_formats >= max_formats) + break; + + formats[num_formats++] = rgb_formats[i]; + } + + if (plane_cap && plane_cap->pixel_format_support.nv12) + formats[num_formats++] = DRM_FORMAT_NV12; + if (plane_cap && plane_cap->pixel_format_support.p010) + formats[num_formats++] = DRM_FORMAT_P010; + if (plane_cap && plane_cap->pixel_format_support.fp16) { + formats[num_formats++] = DRM_FORMAT_XRGB16161616F; + formats[num_formats++] = DRM_FORMAT_ARGB16161616F; + formats[num_formats++] = DRM_FORMAT_XBGR16161616F; + formats[num_formats++] = DRM_FORMAT_ABGR16161616F; + } + } else { + switch (plane->type) { + case DRM_PLANE_TYPE_OVERLAY: + for (i = 0; i < ARRAY_SIZE(overlay_formats); ++i) { + if (num_formats >= max_formats) + break; + + formats[num_formats++] = overlay_formats[i]; + } + break; + + case DRM_PLANE_TYPE_CURSOR: + for (i = 0; i < ARRAY_SIZE(cursor_formats); ++i) { + if (num_formats >= max_formats) + break; + + formats[num_formats++] = cursor_formats[i]; + } + break; + + default: + break; + } + } + + return num_formats; +} + +int amdgpu_dm_plane_fill_plane_buffer_attributes(struct amdgpu_device *adev, + const struct amdgpu_framebuffer *afb, + const enum surface_pixel_format format, + const enum dc_rotation_angle rotation, + const uint64_t tiling_flags, + union dc_tiling_info *tiling_info, + struct plane_size *plane_size, + struct dc_plane_dcc_param *dcc, + struct dc_plane_address *address, + bool tmz_surface, + bool force_disable_dcc) +{ + const struct drm_framebuffer *fb = &afb->base; + int ret; + + memset(tiling_info, 0, sizeof(*tiling_info)); + memset(plane_size, 0, sizeof(*plane_size)); + memset(dcc, 0, sizeof(*dcc)); + memset(address, 0, sizeof(*address)); + + address->tmz_surface = tmz_surface; + + if (format < SURFACE_PIXEL_FORMAT_VIDEO_BEGIN) { + uint64_t addr = afb->address + fb->offsets[0]; + + plane_size->surface_size.x = 0; + plane_size->surface_size.y = 0; + plane_size->surface_size.width = fb->width; + plane_size->surface_size.height = fb->height; + plane_size->surface_pitch = + fb->pitches[0] / fb->format->cpp[0]; + + address->type = PLN_ADDR_TYPE_GRAPHICS; + address->grph.addr.low_part = lower_32_bits(addr); + address->grph.addr.high_part = upper_32_bits(addr); + } else if (format < SURFACE_PIXEL_FORMAT_INVALID) { + uint64_t luma_addr = afb->address + fb->offsets[0]; + uint64_t chroma_addr = afb->address + fb->offsets[1]; + + plane_size->surface_size.x = 0; + plane_size->surface_size.y = 0; + plane_size->surface_size.width = fb->width; + plane_size->surface_size.height = fb->height; + plane_size->surface_pitch = + fb->pitches[0] / fb->format->cpp[0]; + + plane_size->chroma_size.x = 0; + plane_size->chroma_size.y = 0; + /* TODO: set these based on surface format */ + plane_size->chroma_size.width = fb->width / 2; + plane_size->chroma_size.height = fb->height / 2; + + plane_size->chroma_pitch = + fb->pitches[1] / fb->format->cpp[1]; + + address->type = PLN_ADDR_TYPE_VIDEO_PROGRESSIVE; + address->video_progressive.luma_addr.low_part = + lower_32_bits(luma_addr); + address->video_progressive.luma_addr.high_part = + upper_32_bits(luma_addr); + address->video_progressive.chroma_addr.low_part = + lower_32_bits(chroma_addr); + address->video_progressive.chroma_addr.high_part = + upper_32_bits(chroma_addr); + } + + if (adev->family >= AMDGPU_FAMILY_AI) { + ret = fill_gfx9_plane_attributes_from_modifiers(adev, afb, format, + rotation, plane_size, + tiling_info, dcc, + address, + force_disable_dcc); + if (ret) + return ret; + } else { + fill_gfx8_tiling_info_from_flags(tiling_info, tiling_flags); + } + + return 0; +} + +static int dm_plane_helper_prepare_fb(struct drm_plane *plane, + struct drm_plane_state *new_state) +{ + struct amdgpu_framebuffer *afb; + struct drm_gem_object *obj; + struct amdgpu_device *adev; + struct amdgpu_bo *rbo; + struct dm_plane_state *dm_plane_state_new, *dm_plane_state_old; + uint32_t domain; + int r; + + if (!new_state->fb) { + DRM_DEBUG_KMS("No FB bound\n"); + return 0; + } + + afb = to_amdgpu_framebuffer(new_state->fb); + obj = new_state->fb->obj[0]; + rbo = gem_to_amdgpu_bo(obj); + adev = amdgpu_ttm_adev(rbo->tbo.bdev); + + r = amdgpu_bo_reserve(rbo, true); + if (r) { + dev_err(adev->dev, "fail to reserve bo (%d)\n", r); + return r; + } + + r = dma_resv_reserve_fences(rbo->tbo.base.resv, 1); + if (r) { + dev_err(adev->dev, "reserving fence slot failed (%d)\n", r); + goto error_unlock; + } + + if (plane->type != DRM_PLANE_TYPE_CURSOR) + domain = amdgpu_display_supported_domains(adev, rbo->flags); + else + domain = AMDGPU_GEM_DOMAIN_VRAM; + + r = amdgpu_bo_pin(rbo, domain); + if (unlikely(r != 0)) { + if (r != -ERESTARTSYS) + DRM_ERROR("Failed to pin framebuffer with error %d\n", r); + goto error_unlock; + } + + r = amdgpu_ttm_alloc_gart(&rbo->tbo); + if (unlikely(r != 0)) { + DRM_ERROR("%p bind failed\n", rbo); + goto error_unpin; + } + + r = drm_gem_plane_helper_prepare_fb(plane, new_state); + if (unlikely(r != 0)) + goto error_unpin; + + amdgpu_bo_unreserve(rbo); + + afb->address = amdgpu_bo_gpu_offset(rbo); + + amdgpu_bo_ref(rbo); + + /** + * We don't do surface updates on planes that have been newly created, + * but we also don't have the afb->address during atomic check. + * + * Fill in buffer attributes depending on the address here, but only on + * newly created planes since they're not being used by DC yet and this + * won't modify global state. + */ + dm_plane_state_old = to_dm_plane_state(plane->state); + dm_plane_state_new = to_dm_plane_state(new_state); + + if (dm_plane_state_new->dc_state && + dm_plane_state_old->dc_state != dm_plane_state_new->dc_state) { + struct dc_plane_state *plane_state = + dm_plane_state_new->dc_state; + bool force_disable_dcc = !plane_state->dcc.enable; + + amdgpu_dm_plane_fill_plane_buffer_attributes( + adev, afb, plane_state->format, plane_state->rotation, + afb->tiling_flags, + &plane_state->tiling_info, &plane_state->plane_size, + &plane_state->dcc, &plane_state->address, + afb->tmz_surface, force_disable_dcc); + } + + return 0; + +error_unpin: + amdgpu_bo_unpin(rbo); + +error_unlock: + amdgpu_bo_unreserve(rbo); + return r; +} + +static void dm_plane_helper_cleanup_fb(struct drm_plane *plane, + struct drm_plane_state *old_state) +{ + struct amdgpu_bo *rbo; + int r; + + if (!old_state->fb) + return; + + rbo = gem_to_amdgpu_bo(old_state->fb->obj[0]); + r = amdgpu_bo_reserve(rbo, false); + if (unlikely(r)) { + DRM_ERROR("failed to reserve rbo before unpin\n"); + return; + } + + amdgpu_bo_unpin(rbo); + amdgpu_bo_unreserve(rbo); + amdgpu_bo_unref(&rbo); +} + +static void get_min_max_dc_plane_scaling(struct drm_device *dev, + struct drm_framebuffer *fb, + int *min_downscale, int *max_upscale) +{ + struct amdgpu_device *adev = drm_to_adev(dev); + struct dc *dc = adev->dm.dc; + /* Caps for all supported planes are the same on DCE and DCN 1 - 3 */ + struct dc_plane_cap *plane_cap = &dc->caps.planes[0]; + + switch (fb->format->format) { + case DRM_FORMAT_P010: + case DRM_FORMAT_NV12: + case DRM_FORMAT_NV21: + *max_upscale = plane_cap->max_upscale_factor.nv12; + *min_downscale = plane_cap->max_downscale_factor.nv12; + break; + + case DRM_FORMAT_XRGB16161616F: + case DRM_FORMAT_ARGB16161616F: + case DRM_FORMAT_XBGR16161616F: + case DRM_FORMAT_ABGR16161616F: + *max_upscale = plane_cap->max_upscale_factor.fp16; + *min_downscale = plane_cap->max_downscale_factor.fp16; + break; + + default: + *max_upscale = plane_cap->max_upscale_factor.argb8888; + *min_downscale = plane_cap->max_downscale_factor.argb8888; + break; + } + + /* + * A factor of 1 in the plane_cap means to not allow scaling, ie. use a + * scaling factor of 1.0 == 1000 units. + */ + if (*max_upscale == 1) + *max_upscale = 1000; + + if (*min_downscale == 1) + *min_downscale = 1000; +} + +int amdgpu_dm_plane_helper_check_state(struct drm_plane_state *state, + struct drm_crtc_state *new_crtc_state) +{ + struct drm_framebuffer *fb = state->fb; + int min_downscale, max_upscale; + int min_scale = 0; + int max_scale = INT_MAX; + + /* Plane enabled? Validate viewport and get scaling factors from plane caps. */ + if (fb && state->crtc) { + /* Validate viewport to cover the case when only the position changes */ + if (state->plane->type != DRM_PLANE_TYPE_CURSOR) { + int viewport_width = state->crtc_w; + int viewport_height = state->crtc_h; + + if (state->crtc_x < 0) + viewport_width += state->crtc_x; + else if (state->crtc_x + state->crtc_w > new_crtc_state->mode.crtc_hdisplay) + viewport_width = new_crtc_state->mode.crtc_hdisplay - state->crtc_x; + + if (state->crtc_y < 0) + viewport_height += state->crtc_y; + else if (state->crtc_y + state->crtc_h > new_crtc_state->mode.crtc_vdisplay) + viewport_height = new_crtc_state->mode.crtc_vdisplay - state->crtc_y; + + if (viewport_width < 0 || viewport_height < 0) { + DRM_DEBUG_ATOMIC("Plane completely outside of screen\n"); + return -EINVAL; + } else if (viewport_width < MIN_VIEWPORT_SIZE*2) { /* x2 for width is because of pipe-split. */ + DRM_DEBUG_ATOMIC("Viewport width %d smaller than %d\n", viewport_width, MIN_VIEWPORT_SIZE*2); + return -EINVAL; + } else if (viewport_height < MIN_VIEWPORT_SIZE) { + DRM_DEBUG_ATOMIC("Viewport height %d smaller than %d\n", viewport_height, MIN_VIEWPORT_SIZE); + return -EINVAL; + } + + } + + /* Get min/max allowed scaling factors from plane caps. */ + get_min_max_dc_plane_scaling(state->crtc->dev, fb, + &min_downscale, &max_upscale); + /* + * Convert to drm convention: 16.16 fixed point, instead of dc's + * 1.0 == 1000. Also drm scaling is src/dst instead of dc's + * dst/src, so min_scale = 1.0 / max_upscale, etc. + */ + min_scale = (1000 << 16) / max_upscale; + max_scale = (1000 << 16) / min_downscale; + } + + return drm_atomic_helper_check_plane_state( + state, new_crtc_state, min_scale, max_scale, true, true); +} + +int amdgpu_dm_plane_fill_dc_scaling_info(struct amdgpu_device *adev, + const struct drm_plane_state *state, + struct dc_scaling_info *scaling_info) +{ + int scale_w, scale_h, min_downscale, max_upscale; + + memset(scaling_info, 0, sizeof(*scaling_info)); + + /* Source is fixed 16.16 but we ignore mantissa for now... */ + scaling_info->src_rect.x = state->src_x >> 16; + scaling_info->src_rect.y = state->src_y >> 16; + + /* + * For reasons we don't (yet) fully understand a non-zero + * src_y coordinate into an NV12 buffer can cause a + * system hang on DCN1x. + * To avoid hangs (and maybe be overly cautious) + * let's reject both non-zero src_x and src_y. + * + * We currently know of only one use-case to reproduce a + * scenario with non-zero src_x and src_y for NV12, which + * is to gesture the YouTube Android app into full screen + * on ChromeOS. + */ + if (((adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 0)) || + (adev->ip_versions[DCE_HWIP][0] == IP_VERSION(1, 0, 1))) && + (state->fb && state->fb->format->format == DRM_FORMAT_NV12 && + (scaling_info->src_rect.x != 0 || scaling_info->src_rect.y != 0))) + return -EINVAL; + + scaling_info->src_rect.width = state->src_w >> 16; + if (scaling_info->src_rect.width == 0) + return -EINVAL; + + scaling_info->src_rect.height = state->src_h >> 16; + if (scaling_info->src_rect.height == 0) + return -EINVAL; + + scaling_info->dst_rect.x = state->crtc_x; + scaling_info->dst_rect.y = state->crtc_y; + + if (state->crtc_w == 0) + return -EINVAL; + + scaling_info->dst_rect.width = state->crtc_w; + + if (state->crtc_h == 0) + return -EINVAL; + + scaling_info->dst_rect.height = state->crtc_h; + + /* DRM doesn't specify clipping on destination output. */ + scaling_info->clip_rect = scaling_info->dst_rect; + + /* Validate scaling per-format with DC plane caps */ + if (state->plane && state->plane->dev && state->fb) { + get_min_max_dc_plane_scaling(state->plane->dev, state->fb, + &min_downscale, &max_upscale); + } else { + min_downscale = 250; + max_upscale = 16000; + } + + scale_w = scaling_info->dst_rect.width * 1000 / + scaling_info->src_rect.width; + + if (scale_w < min_downscale || scale_w > max_upscale) + return -EINVAL; + + scale_h = scaling_info->dst_rect.height * 1000 / + scaling_info->src_rect.height; + + if (scale_h < min_downscale || scale_h > max_upscale) + return -EINVAL; + + /* + * The "scaling_quality" can be ignored for now, quality = 0 has DC + * assume reasonable defaults based on the format. + */ + + return 0; +} + +static int dm_plane_atomic_check(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, + plane); + struct amdgpu_device *adev = drm_to_adev(plane->dev); + struct dc *dc = adev->dm.dc; + struct dm_plane_state *dm_plane_state; + struct dc_scaling_info scaling_info; + struct drm_crtc_state *new_crtc_state; + int ret; + + trace_amdgpu_dm_plane_atomic_check(new_plane_state); + + dm_plane_state = to_dm_plane_state(new_plane_state); + + if (!dm_plane_state->dc_state) + return 0; + + new_crtc_state = + drm_atomic_get_new_crtc_state(state, + new_plane_state->crtc); + if (!new_crtc_state) + return -EINVAL; + + ret = amdgpu_dm_plane_helper_check_state(new_plane_state, new_crtc_state); + if (ret) + return ret; + + ret = amdgpu_dm_plane_fill_dc_scaling_info(adev, new_plane_state, &scaling_info); + if (ret) + return ret; + + if (dc_validate_plane(dc, dm_plane_state->dc_state) == DC_OK) + return 0; + + return -EINVAL; +} + +static int dm_plane_atomic_async_check(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + /* Only support async updates on cursor planes. */ + if (plane->type != DRM_PLANE_TYPE_CURSOR) + return -EINVAL; + + return 0; +} + +static int get_cursor_position(struct drm_plane *plane, struct drm_crtc *crtc, + struct dc_cursor_position *position) +{ + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + int x, y; + int xorigin = 0, yorigin = 0; + + if (!crtc || !plane->state->fb) + return 0; + + if ((plane->state->crtc_w > amdgpu_crtc->max_cursor_width) || + (plane->state->crtc_h > amdgpu_crtc->max_cursor_height)) { + DRM_ERROR("%s: bad cursor width or height %d x %d\n", + __func__, + plane->state->crtc_w, + plane->state->crtc_h); + return -EINVAL; + } + + x = plane->state->crtc_x; + y = plane->state->crtc_y; + + if (x <= -amdgpu_crtc->max_cursor_width || + y <= -amdgpu_crtc->max_cursor_height) + return 0; + + if (x < 0) { + xorigin = min(-x, amdgpu_crtc->max_cursor_width - 1); + x = 0; + } + if (y < 0) { + yorigin = min(-y, amdgpu_crtc->max_cursor_height - 1); + y = 0; + } + position->enable = true; + position->translate_by_source = true; + position->x = x; + position->y = y; + position->x_hotspot = xorigin; + position->y_hotspot = yorigin; + + return 0; +} + +void amdgpu_dm_plane_handle_cursor_update(struct drm_plane *plane, + struct drm_plane_state *old_plane_state) +{ + struct amdgpu_device *adev = drm_to_adev(plane->dev); + struct amdgpu_framebuffer *afb = to_amdgpu_framebuffer(plane->state->fb); + struct drm_crtc *crtc = afb ? plane->state->crtc : old_plane_state->crtc; + struct dm_crtc_state *crtc_state = crtc ? to_dm_crtc_state(crtc->state) : NULL; + struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); + uint64_t address = afb ? afb->address : 0; + struct dc_cursor_position position = {0}; + struct dc_cursor_attributes attributes; + int ret; + + if (!plane->state->fb && !old_plane_state->fb) + return; + + DC_LOG_CURSOR("%s: crtc_id=%d with size %d to %d\n", + __func__, + amdgpu_crtc->crtc_id, + plane->state->crtc_w, + plane->state->crtc_h); + + ret = get_cursor_position(plane, crtc, &position); + if (ret) + return; + + if (!position.enable) { + /* turn off cursor */ + if (crtc_state && crtc_state->stream) { + mutex_lock(&adev->dm.dc_lock); + dc_stream_set_cursor_position(crtc_state->stream, + &position); + mutex_unlock(&adev->dm.dc_lock); + } + return; + } + + amdgpu_crtc->cursor_width = plane->state->crtc_w; + amdgpu_crtc->cursor_height = plane->state->crtc_h; + + memset(&attributes, 0, sizeof(attributes)); + attributes.address.high_part = upper_32_bits(address); + attributes.address.low_part = lower_32_bits(address); + attributes.width = plane->state->crtc_w; + attributes.height = plane->state->crtc_h; + attributes.color_format = CURSOR_MODE_COLOR_PRE_MULTIPLIED_ALPHA; + attributes.rotation_angle = 0; + attributes.attribute_flags.value = 0; + + /* Enable cursor degamma ROM on DCN3+ for implicit sRGB degamma in DRM + * legacy gamma setup. + */ + if (crtc_state->cm_is_degamma_srgb && + adev->dm.dc->caps.color.dpp.gamma_corr) + attributes.attribute_flags.bits.ENABLE_CURSOR_DEGAMMA = 1; + + attributes.pitch = afb->base.pitches[0] / afb->base.format->cpp[0]; + + if (crtc_state->stream) { + mutex_lock(&adev->dm.dc_lock); + if (!dc_stream_set_cursor_attributes(crtc_state->stream, + &attributes)) + DRM_ERROR("DC failed to set cursor attributes\n"); + + if (!dc_stream_set_cursor_position(crtc_state->stream, + &position)) + DRM_ERROR("DC failed to set cursor position\n"); + mutex_unlock(&adev->dm.dc_lock); + } +} + +static void dm_plane_atomic_async_update(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(state, + plane); + struct drm_plane_state *old_state = + drm_atomic_get_old_plane_state(state, plane); + + trace_amdgpu_dm_atomic_update_cursor(new_state); + + swap(plane->state->fb, new_state->fb); + + plane->state->src_x = new_state->src_x; + plane->state->src_y = new_state->src_y; + plane->state->src_w = new_state->src_w; + plane->state->src_h = new_state->src_h; + plane->state->crtc_x = new_state->crtc_x; + plane->state->crtc_y = new_state->crtc_y; + plane->state->crtc_w = new_state->crtc_w; + plane->state->crtc_h = new_state->crtc_h; + + amdgpu_dm_plane_handle_cursor_update(plane, old_state); +} + +static const struct drm_plane_helper_funcs dm_plane_helper_funcs = { + .prepare_fb = dm_plane_helper_prepare_fb, + .cleanup_fb = dm_plane_helper_cleanup_fb, + .atomic_check = dm_plane_atomic_check, + .atomic_async_check = dm_plane_atomic_async_check, + .atomic_async_update = dm_plane_atomic_async_update +}; + +static void dm_drm_plane_reset(struct drm_plane *plane) +{ + struct dm_plane_state *amdgpu_state = NULL; + + if (plane->state) + plane->funcs->atomic_destroy_state(plane, plane->state); + + amdgpu_state = kzalloc(sizeof(*amdgpu_state), GFP_KERNEL); + WARN_ON(amdgpu_state == NULL); + + if (amdgpu_state) + __drm_atomic_helper_plane_reset(plane, &amdgpu_state->base); +} + +static struct drm_plane_state * +dm_drm_plane_duplicate_state(struct drm_plane *plane) +{ + struct dm_plane_state *dm_plane_state, *old_dm_plane_state; + + old_dm_plane_state = to_dm_plane_state(plane->state); + dm_plane_state = kzalloc(sizeof(*dm_plane_state), GFP_KERNEL); + if (!dm_plane_state) + return NULL; + + __drm_atomic_helper_plane_duplicate_state(plane, &dm_plane_state->base); + + if (old_dm_plane_state->dc_state) { + dm_plane_state->dc_state = old_dm_plane_state->dc_state; + dc_plane_state_retain(dm_plane_state->dc_state); + } + + return &dm_plane_state->base; +} + +static bool dm_plane_format_mod_supported(struct drm_plane *plane, + uint32_t format, + uint64_t modifier) +{ + struct amdgpu_device *adev = drm_to_adev(plane->dev); + const struct drm_format_info *info = drm_format_info(format); + int i; + + enum dm_micro_swizzle microtile = modifier_gfx9_swizzle_mode(modifier) & 3; + + if (!info) + return false; + + /* + * We always have to allow these modifiers: + * 1. Core DRM checks for LINEAR support if userspace does not provide modifiers. + * 2. Not passing any modifiers is the same as explicitly passing INVALID. + */ + if (modifier == DRM_FORMAT_MOD_LINEAR || + modifier == DRM_FORMAT_MOD_INVALID) { + return true; + } + + /* Check that the modifier is on the list of the plane's supported modifiers. */ + for (i = 0; i < plane->modifier_count; i++) { + if (modifier == plane->modifiers[i]) + break; + } + if (i == plane->modifier_count) + return false; + + /* + * For D swizzle the canonical modifier depends on the bpp, so check + * it here. + */ + if (AMD_FMT_MOD_GET(TILE_VERSION, modifier) == AMD_FMT_MOD_TILE_VER_GFX9 && + adev->family >= AMDGPU_FAMILY_NV) { + if (microtile == MICRO_SWIZZLE_D && info->cpp[0] == 4) + return false; + } + + if (adev->family >= AMDGPU_FAMILY_RV && microtile == MICRO_SWIZZLE_D && + info->cpp[0] < 8) + return false; + + if (modifier_has_dcc(modifier)) { + /* Per radeonsi comments 16/64 bpp are more complicated. */ + if (info->cpp[0] != 4) + return false; + /* We support multi-planar formats, but not when combined with + * additional DCC metadata planes. + */ + if (info->num_planes > 1) + return false; + } + + return true; +} + +static void dm_drm_plane_destroy_state(struct drm_plane *plane, + struct drm_plane_state *state) +{ + struct dm_plane_state *dm_plane_state = to_dm_plane_state(state); + + if (dm_plane_state->dc_state) + dc_plane_state_release(dm_plane_state->dc_state); + + drm_atomic_helper_plane_destroy_state(plane, state); +} + +static const struct drm_plane_funcs dm_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = drm_plane_helper_destroy, + .reset = dm_drm_plane_reset, + .atomic_duplicate_state = dm_drm_plane_duplicate_state, + .atomic_destroy_state = dm_drm_plane_destroy_state, + .format_mod_supported = dm_plane_format_mod_supported, +}; + +int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, + struct drm_plane *plane, + unsigned long possible_crtcs, + const struct dc_plane_cap *plane_cap) +{ + uint32_t formats[32]; + int num_formats; + int res = -EPERM; + unsigned int supported_rotations; + uint64_t *modifiers = NULL; + + num_formats = get_plane_formats(plane, plane_cap, formats, + ARRAY_SIZE(formats)); + + res = get_plane_modifiers(dm->adev, plane->type, &modifiers); + if (res) + return res; + + if (modifiers == NULL) + adev_to_drm(dm->adev)->mode_config.fb_modifiers_not_supported = true; + + res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs, + &dm_plane_funcs, formats, num_formats, + modifiers, plane->type, NULL); + kfree(modifiers); + if (res) + return res; + + if (plane->type == DRM_PLANE_TYPE_OVERLAY && + plane_cap && plane_cap->per_pixel_alpha) { + unsigned int blend_caps = BIT(DRM_MODE_BLEND_PIXEL_NONE) | + BIT(DRM_MODE_BLEND_PREMULTI) | + BIT(DRM_MODE_BLEND_COVERAGE); + + drm_plane_create_alpha_property(plane); + drm_plane_create_blend_mode_property(plane, blend_caps); + } + + if (plane->type == DRM_PLANE_TYPE_PRIMARY) { + drm_plane_create_zpos_immutable_property(plane, 0); + } else if (plane->type == DRM_PLANE_TYPE_OVERLAY) { + unsigned int zpos = 1 + drm_plane_index(plane); + drm_plane_create_zpos_property(plane, zpos, 1, 254); + } else if (plane->type == DRM_PLANE_TYPE_CURSOR) { + drm_plane_create_zpos_immutable_property(plane, 255); + } + + if (plane->type == DRM_PLANE_TYPE_PRIMARY && + plane_cap && + (plane_cap->pixel_format_support.nv12 || + plane_cap->pixel_format_support.p010)) { + /* This only affects YUV formats. */ + drm_plane_create_color_properties( + plane, + BIT(DRM_COLOR_YCBCR_BT601) | + BIT(DRM_COLOR_YCBCR_BT709) | + BIT(DRM_COLOR_YCBCR_BT2020), + BIT(DRM_COLOR_YCBCR_LIMITED_RANGE) | + BIT(DRM_COLOR_YCBCR_FULL_RANGE), + DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE); + } + + supported_rotations = + DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_90 | + DRM_MODE_ROTATE_180 | DRM_MODE_ROTATE_270; + + if (dm->adev->asic_type >= CHIP_BONAIRE && + plane->type != DRM_PLANE_TYPE_CURSOR) + drm_plane_create_rotation_property(plane, DRM_MODE_ROTATE_0, + supported_rotations); + + if (dm->adev->ip_versions[DCE_HWIP][0] > IP_VERSION(3, 0, 1) && + plane->type != DRM_PLANE_TYPE_CURSOR) + drm_plane_enable_fb_damage_clips(plane); + + drm_plane_helper_add(plane, &dm_plane_helper_funcs); + + /* Create (reset) the plane state */ + if (plane->funcs->reset) + plane->funcs->reset(plane); + + return 0; +} + +bool is_video_format(uint32_t format) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(video_formats); i++) + if (format == video_formats[i]) + return true; + + return false; +} + diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h new file mode 100644 index 0000000000..930f1572f8 --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.h @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2022 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __AMDGPU_DM_PLANE_H__ +#define __AMDGPU_DM_PLANE_H__ + +#include "dc.h" + +void amdgpu_dm_plane_handle_cursor_update(struct drm_plane *plane, + struct drm_plane_state *old_plane_state); + +int amdgpu_dm_plane_fill_dc_scaling_info(struct amdgpu_device *adev, + const struct drm_plane_state *state, + struct dc_scaling_info *scaling_info); + +int amdgpu_dm_plane_helper_check_state(struct drm_plane_state *state, + struct drm_crtc_state *new_crtc_state); + +int amdgpu_dm_plane_fill_plane_buffer_attributes(struct amdgpu_device *adev, + const struct amdgpu_framebuffer *afb, + const enum surface_pixel_format format, + const enum dc_rotation_angle rotation, + const uint64_t tiling_flags, + union dc_tiling_info *tiling_info, + struct plane_size *plane_size, + struct dc_plane_dcc_param *dcc, + struct dc_plane_address *address, + bool tmz_surface, + bool force_disable_dcc); + +int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, + struct drm_plane *plane, + unsigned long possible_crtcs, + const struct dc_plane_cap *plane_cap); + +const struct drm_format_info *amdgpu_dm_plane_get_format_info(const struct drm_mode_fb_cmd2 *cmd); + +void amdgpu_dm_plane_fill_blending_from_plane_state(const struct drm_plane_state *plane_state, + bool *per_pixel_alpha, bool *pre_multiplied_alpha, + bool *global_alpha, int *global_alpha_value); + +bool is_video_format(uint32_t format); +#endif diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c new file mode 100644 index 0000000000..848c5b4bb3 --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c @@ -0,0 +1,806 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + */ +#include <linux/string.h> +#include <linux/acpi.h> + +#include <drm/drm_probe_helper.h> +#include <drm/amdgpu_drm.h> +#include "dm_services.h" +#include "amdgpu.h" +#include "amdgpu_dm.h" +#include "amdgpu_dm_irq.h" +#include "amdgpu_pm.h" +#include "dm_pp_smu.h" + +bool dm_pp_apply_display_requirements( + const struct dc_context *ctx, + const struct dm_pp_display_configuration *pp_display_cfg) +{ + struct amdgpu_device *adev = ctx->driver_context; + int i; + + if (adev->pm.dpm_enabled) { + + memset(&adev->pm.pm_display_cfg, 0, + sizeof(adev->pm.pm_display_cfg)); + + adev->pm.pm_display_cfg.cpu_cc6_disable = + pp_display_cfg->cpu_cc6_disable; + + adev->pm.pm_display_cfg.cpu_pstate_disable = + pp_display_cfg->cpu_pstate_disable; + + adev->pm.pm_display_cfg.cpu_pstate_separation_time = + pp_display_cfg->cpu_pstate_separation_time; + + adev->pm.pm_display_cfg.nb_pstate_switch_disable = + pp_display_cfg->nb_pstate_switch_disable; + + adev->pm.pm_display_cfg.num_display = + pp_display_cfg->display_count; + adev->pm.pm_display_cfg.num_path_including_non_display = + pp_display_cfg->display_count; + + adev->pm.pm_display_cfg.min_core_set_clock = + pp_display_cfg->min_engine_clock_khz/10; + adev->pm.pm_display_cfg.min_core_set_clock_in_sr = + pp_display_cfg->min_engine_clock_deep_sleep_khz/10; + adev->pm.pm_display_cfg.min_mem_set_clock = + pp_display_cfg->min_memory_clock_khz/10; + + adev->pm.pm_display_cfg.min_dcef_deep_sleep_set_clk = + pp_display_cfg->min_engine_clock_deep_sleep_khz/10; + adev->pm.pm_display_cfg.min_dcef_set_clk = + pp_display_cfg->min_dcfclock_khz/10; + + adev->pm.pm_display_cfg.multi_monitor_in_sync = + pp_display_cfg->all_displays_in_sync; + adev->pm.pm_display_cfg.min_vblank_time = + pp_display_cfg->avail_mclk_switch_time_us; + + adev->pm.pm_display_cfg.display_clk = + pp_display_cfg->disp_clk_khz/10; + + adev->pm.pm_display_cfg.dce_tolerable_mclk_in_active_latency = + pp_display_cfg->avail_mclk_switch_time_in_disp_active_us; + + adev->pm.pm_display_cfg.crtc_index = pp_display_cfg->crtc_index; + adev->pm.pm_display_cfg.line_time_in_us = + pp_display_cfg->line_time_in_us; + + adev->pm.pm_display_cfg.vrefresh = pp_display_cfg->disp_configs[0].v_refresh; + adev->pm.pm_display_cfg.crossfire_display_index = -1; + adev->pm.pm_display_cfg.min_bus_bandwidth = 0; + + for (i = 0; i < pp_display_cfg->display_count; i++) { + const struct dm_pp_single_disp_config *dc_cfg = + &pp_display_cfg->disp_configs[i]; + adev->pm.pm_display_cfg.displays[i].controller_id = dc_cfg->pipe_idx + 1; + } + + amdgpu_dpm_display_configuration_change(adev, &adev->pm.pm_display_cfg); + + amdgpu_dpm_compute_clocks(adev); + } + + return true; +} + +static void get_default_clock_levels( + enum dm_pp_clock_type clk_type, + struct dm_pp_clock_levels *clks) +{ + uint32_t disp_clks_in_khz[6] = { + 300000, 400000, 496560, 626090, 685720, 757900 }; + uint32_t sclks_in_khz[6] = { + 300000, 360000, 423530, 514290, 626090, 720000 }; + uint32_t mclks_in_khz[2] = { 333000, 800000 }; + + switch (clk_type) { + case DM_PP_CLOCK_TYPE_DISPLAY_CLK: + clks->num_levels = 6; + memmove(clks->clocks_in_khz, disp_clks_in_khz, + sizeof(disp_clks_in_khz)); + break; + case DM_PP_CLOCK_TYPE_ENGINE_CLK: + clks->num_levels = 6; + memmove(clks->clocks_in_khz, sclks_in_khz, + sizeof(sclks_in_khz)); + break; + case DM_PP_CLOCK_TYPE_MEMORY_CLK: + clks->num_levels = 2; + memmove(clks->clocks_in_khz, mclks_in_khz, + sizeof(mclks_in_khz)); + break; + default: + clks->num_levels = 0; + break; + } +} + +static enum amd_pp_clock_type dc_to_pp_clock_type( + enum dm_pp_clock_type dm_pp_clk_type) +{ + enum amd_pp_clock_type amd_pp_clk_type = 0; + + switch (dm_pp_clk_type) { + case DM_PP_CLOCK_TYPE_DISPLAY_CLK: + amd_pp_clk_type = amd_pp_disp_clock; + break; + case DM_PP_CLOCK_TYPE_ENGINE_CLK: + amd_pp_clk_type = amd_pp_sys_clock; + break; + case DM_PP_CLOCK_TYPE_MEMORY_CLK: + amd_pp_clk_type = amd_pp_mem_clock; + break; + case DM_PP_CLOCK_TYPE_DCEFCLK: + amd_pp_clk_type = amd_pp_dcef_clock; + break; + case DM_PP_CLOCK_TYPE_DCFCLK: + amd_pp_clk_type = amd_pp_dcf_clock; + break; + case DM_PP_CLOCK_TYPE_PIXELCLK: + amd_pp_clk_type = amd_pp_pixel_clock; + break; + case DM_PP_CLOCK_TYPE_FCLK: + amd_pp_clk_type = amd_pp_f_clock; + break; + case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK: + amd_pp_clk_type = amd_pp_phy_clock; + break; + case DM_PP_CLOCK_TYPE_DPPCLK: + amd_pp_clk_type = amd_pp_dpp_clock; + break; + default: + DRM_ERROR("DM_PPLIB: invalid clock type: %d!\n", + dm_pp_clk_type); + break; + } + + return amd_pp_clk_type; +} + +static enum dm_pp_clocks_state pp_to_dc_powerlevel_state( + enum PP_DAL_POWERLEVEL max_clocks_state) +{ + switch (max_clocks_state) { + case PP_DAL_POWERLEVEL_0: + return DM_PP_CLOCKS_DPM_STATE_LEVEL_0; + case PP_DAL_POWERLEVEL_1: + return DM_PP_CLOCKS_DPM_STATE_LEVEL_1; + case PP_DAL_POWERLEVEL_2: + return DM_PP_CLOCKS_DPM_STATE_LEVEL_2; + case PP_DAL_POWERLEVEL_3: + return DM_PP_CLOCKS_DPM_STATE_LEVEL_3; + case PP_DAL_POWERLEVEL_4: + return DM_PP_CLOCKS_DPM_STATE_LEVEL_4; + case PP_DAL_POWERLEVEL_5: + return DM_PP_CLOCKS_DPM_STATE_LEVEL_5; + case PP_DAL_POWERLEVEL_6: + return DM_PP_CLOCKS_DPM_STATE_LEVEL_6; + case PP_DAL_POWERLEVEL_7: + return DM_PP_CLOCKS_DPM_STATE_LEVEL_7; + default: + DRM_ERROR("DM_PPLIB: invalid powerlevel state: %d!\n", + max_clocks_state); + return DM_PP_CLOCKS_STATE_INVALID; + } +} + +static void pp_to_dc_clock_levels( + const struct amd_pp_clocks *pp_clks, + struct dm_pp_clock_levels *dc_clks, + enum dm_pp_clock_type dc_clk_type) +{ + uint32_t i; + + if (pp_clks->count > DM_PP_MAX_CLOCK_LEVELS) { + DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n", + DC_DECODE_PP_CLOCK_TYPE(dc_clk_type), + pp_clks->count, + DM_PP_MAX_CLOCK_LEVELS); + + dc_clks->num_levels = DM_PP_MAX_CLOCK_LEVELS; + } else + dc_clks->num_levels = pp_clks->count; + + DRM_INFO("DM_PPLIB: values for %s clock\n", + DC_DECODE_PP_CLOCK_TYPE(dc_clk_type)); + + for (i = 0; i < dc_clks->num_levels; i++) { + DRM_INFO("DM_PPLIB:\t %d\n", pp_clks->clock[i]); + dc_clks->clocks_in_khz[i] = pp_clks->clock[i]; + } +} + +static void pp_to_dc_clock_levels_with_latency( + const struct pp_clock_levels_with_latency *pp_clks, + struct dm_pp_clock_levels_with_latency *clk_level_info, + enum dm_pp_clock_type dc_clk_type) +{ + uint32_t i; + + if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) { + DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n", + DC_DECODE_PP_CLOCK_TYPE(dc_clk_type), + pp_clks->num_levels, + DM_PP_MAX_CLOCK_LEVELS); + + clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS; + } else + clk_level_info->num_levels = pp_clks->num_levels; + + DRM_DEBUG("DM_PPLIB: values for %s clock\n", + DC_DECODE_PP_CLOCK_TYPE(dc_clk_type)); + + for (i = 0; i < clk_level_info->num_levels; i++) { + DRM_DEBUG("DM_PPLIB:\t %d in kHz\n", pp_clks->data[i].clocks_in_khz); + clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz; + clk_level_info->data[i].latency_in_us = pp_clks->data[i].latency_in_us; + } +} + +static void pp_to_dc_clock_levels_with_voltage( + const struct pp_clock_levels_with_voltage *pp_clks, + struct dm_pp_clock_levels_with_voltage *clk_level_info, + enum dm_pp_clock_type dc_clk_type) +{ + uint32_t i; + + if (pp_clks->num_levels > DM_PP_MAX_CLOCK_LEVELS) { + DRM_INFO("DM_PPLIB: Warning: %s clock: number of levels %d exceeds maximum of %d!\n", + DC_DECODE_PP_CLOCK_TYPE(dc_clk_type), + pp_clks->num_levels, + DM_PP_MAX_CLOCK_LEVELS); + + clk_level_info->num_levels = DM_PP_MAX_CLOCK_LEVELS; + } else + clk_level_info->num_levels = pp_clks->num_levels; + + DRM_INFO("DM_PPLIB: values for %s clock\n", + DC_DECODE_PP_CLOCK_TYPE(dc_clk_type)); + + for (i = 0; i < clk_level_info->num_levels; i++) { + DRM_INFO("DM_PPLIB:\t %d in kHz, %d in mV\n", pp_clks->data[i].clocks_in_khz, + pp_clks->data[i].voltage_in_mv); + clk_level_info->data[i].clocks_in_khz = pp_clks->data[i].clocks_in_khz; + clk_level_info->data[i].voltage_in_mv = pp_clks->data[i].voltage_in_mv; + } +} + +bool dm_pp_get_clock_levels_by_type( + const struct dc_context *ctx, + enum dm_pp_clock_type clk_type, + struct dm_pp_clock_levels *dc_clks) +{ + struct amdgpu_device *adev = ctx->driver_context; + struct amd_pp_clocks pp_clks = { 0 }; + struct amd_pp_simple_clock_info validation_clks = { 0 }; + uint32_t i; + + if (amdgpu_dpm_get_clock_by_type(adev, + dc_to_pp_clock_type(clk_type), &pp_clks)) { + /* Error in pplib. Provide default values. */ + get_default_clock_levels(clk_type, dc_clks); + return true; + } + + pp_to_dc_clock_levels(&pp_clks, dc_clks, clk_type); + + if (amdgpu_dpm_get_display_mode_validation_clks(adev, &validation_clks)) { + /* Error in pplib. Provide default values. */ + DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n"); + validation_clks.engine_max_clock = 72000; + validation_clks.memory_max_clock = 80000; + validation_clks.level = 0; + } + + DRM_INFO("DM_PPLIB: Validation clocks:\n"); + DRM_INFO("DM_PPLIB: engine_max_clock: %d\n", + validation_clks.engine_max_clock); + DRM_INFO("DM_PPLIB: memory_max_clock: %d\n", + validation_clks.memory_max_clock); + DRM_INFO("DM_PPLIB: level : %d\n", + validation_clks.level); + + /* Translate 10 kHz to kHz. */ + validation_clks.engine_max_clock *= 10; + validation_clks.memory_max_clock *= 10; + + /* Determine the highest non-boosted level from the Validation Clocks */ + if (clk_type == DM_PP_CLOCK_TYPE_ENGINE_CLK) { + for (i = 0; i < dc_clks->num_levels; i++) { + if (dc_clks->clocks_in_khz[i] > validation_clks.engine_max_clock) { + /* This clock is higher the validation clock. + * Than means the previous one is the highest + * non-boosted one. + */ + DRM_INFO("DM_PPLIB: reducing engine clock level from %d to %d\n", + dc_clks->num_levels, i); + dc_clks->num_levels = i > 0 ? i : 1; + break; + } + } + } else if (clk_type == DM_PP_CLOCK_TYPE_MEMORY_CLK) { + for (i = 0; i < dc_clks->num_levels; i++) { + if (dc_clks->clocks_in_khz[i] > validation_clks.memory_max_clock) { + DRM_INFO("DM_PPLIB: reducing memory clock level from %d to %d\n", + dc_clks->num_levels, i); + dc_clks->num_levels = i > 0 ? i : 1; + break; + } + } + } + + return true; +} + +bool dm_pp_get_clock_levels_by_type_with_latency( + const struct dc_context *ctx, + enum dm_pp_clock_type clk_type, + struct dm_pp_clock_levels_with_latency *clk_level_info) +{ + struct amdgpu_device *adev = ctx->driver_context; + struct pp_clock_levels_with_latency pp_clks = { 0 }; + int ret; + + ret = amdgpu_dpm_get_clock_by_type_with_latency(adev, + dc_to_pp_clock_type(clk_type), + &pp_clks); + if (ret) + return false; + + pp_to_dc_clock_levels_with_latency(&pp_clks, clk_level_info, clk_type); + + return true; +} + +bool dm_pp_get_clock_levels_by_type_with_voltage( + const struct dc_context *ctx, + enum dm_pp_clock_type clk_type, + struct dm_pp_clock_levels_with_voltage *clk_level_info) +{ + struct amdgpu_device *adev = ctx->driver_context; + struct pp_clock_levels_with_voltage pp_clk_info = {0}; + int ret; + + ret = amdgpu_dpm_get_clock_by_type_with_voltage(adev, + dc_to_pp_clock_type(clk_type), + &pp_clk_info); + if (ret) + return false; + + pp_to_dc_clock_levels_with_voltage(&pp_clk_info, clk_level_info, clk_type); + + return true; +} + +bool dm_pp_notify_wm_clock_changes( + const struct dc_context *ctx, + struct dm_pp_wm_sets_with_clock_ranges *wm_with_clock_ranges) +{ + struct amdgpu_device *adev = ctx->driver_context; + + /* + * Limit this watermark setting for Polaris for now + * TODO: expand this to other ASICs + */ + if ((adev->asic_type >= CHIP_POLARIS10) && + (adev->asic_type <= CHIP_VEGAM) && + !amdgpu_dpm_set_watermarks_for_clocks_ranges(adev, + (void *)wm_with_clock_ranges)) + return true; + + return false; +} + +bool dm_pp_apply_power_level_change_request( + const struct dc_context *ctx, + struct dm_pp_power_level_change_request *level_change_req) +{ + /* TODO: to be implemented */ + return false; +} + +bool dm_pp_apply_clock_for_voltage_request( + const struct dc_context *ctx, + struct dm_pp_clock_for_voltage_req *clock_for_voltage_req) +{ + struct amdgpu_device *adev = ctx->driver_context; + struct pp_display_clock_request pp_clock_request = {0}; + int ret = 0; + + pp_clock_request.clock_type = dc_to_pp_clock_type(clock_for_voltage_req->clk_type); + pp_clock_request.clock_freq_in_khz = clock_for_voltage_req->clocks_in_khz; + + if (!pp_clock_request.clock_type) + return false; + + ret = amdgpu_dpm_display_clock_voltage_request(adev, &pp_clock_request); + if (ret && (ret != -EOPNOTSUPP)) + return false; + + return true; +} + +bool dm_pp_get_static_clocks( + const struct dc_context *ctx, + struct dm_pp_static_clock_info *static_clk_info) +{ + struct amdgpu_device *adev = ctx->driver_context; + struct amd_pp_clock_info pp_clk_info = {0}; + + if (amdgpu_dpm_get_current_clocks(adev, &pp_clk_info)) + return false; + + static_clk_info->max_clocks_state = pp_to_dc_powerlevel_state(pp_clk_info.max_clocks_state); + static_clk_info->max_mclk_khz = pp_clk_info.max_memory_clock * 10; + static_clk_info->max_sclk_khz = pp_clk_info.max_engine_clock * 10; + + return true; +} + +static void pp_rv_set_wm_ranges(struct pp_smu *pp, + struct pp_smu_wm_range_sets *ranges) +{ + const struct dc_context *ctx = pp->dm; + struct amdgpu_device *adev = ctx->driver_context; + struct dm_pp_wm_sets_with_clock_ranges_soc15 wm_with_clock_ranges; + struct dm_pp_clock_range_for_dmif_wm_set_soc15 *wm_dce_clocks = wm_with_clock_ranges.wm_dmif_clocks_ranges; + struct dm_pp_clock_range_for_mcif_wm_set_soc15 *wm_soc_clocks = wm_with_clock_ranges.wm_mcif_clocks_ranges; + int32_t i; + + wm_with_clock_ranges.num_wm_dmif_sets = ranges->num_reader_wm_sets; + wm_with_clock_ranges.num_wm_mcif_sets = ranges->num_writer_wm_sets; + + for (i = 0; i < wm_with_clock_ranges.num_wm_dmif_sets; i++) { + if (ranges->reader_wm_sets[i].wm_inst > 3) + wm_dce_clocks[i].wm_set_id = WM_SET_A; + else + wm_dce_clocks[i].wm_set_id = + ranges->reader_wm_sets[i].wm_inst; + wm_dce_clocks[i].wm_max_dcfclk_clk_in_khz = + ranges->reader_wm_sets[i].max_drain_clk_mhz * 1000; + wm_dce_clocks[i].wm_min_dcfclk_clk_in_khz = + ranges->reader_wm_sets[i].min_drain_clk_mhz * 1000; + wm_dce_clocks[i].wm_max_mem_clk_in_khz = + ranges->reader_wm_sets[i].max_fill_clk_mhz * 1000; + wm_dce_clocks[i].wm_min_mem_clk_in_khz = + ranges->reader_wm_sets[i].min_fill_clk_mhz * 1000; + } + + for (i = 0; i < wm_with_clock_ranges.num_wm_mcif_sets; i++) { + if (ranges->writer_wm_sets[i].wm_inst > 3) + wm_soc_clocks[i].wm_set_id = WM_SET_A; + else + wm_soc_clocks[i].wm_set_id = + ranges->writer_wm_sets[i].wm_inst; + wm_soc_clocks[i].wm_max_socclk_clk_in_khz = + ranges->writer_wm_sets[i].max_fill_clk_mhz * 1000; + wm_soc_clocks[i].wm_min_socclk_clk_in_khz = + ranges->writer_wm_sets[i].min_fill_clk_mhz * 1000; + wm_soc_clocks[i].wm_max_mem_clk_in_khz = + ranges->writer_wm_sets[i].max_drain_clk_mhz * 1000; + wm_soc_clocks[i].wm_min_mem_clk_in_khz = + ranges->writer_wm_sets[i].min_drain_clk_mhz * 1000; + } + + amdgpu_dpm_set_watermarks_for_clocks_ranges(adev, + &wm_with_clock_ranges); +} + +static void pp_rv_set_pme_wa_enable(struct pp_smu *pp) +{ + const struct dc_context *ctx = pp->dm; + struct amdgpu_device *adev = ctx->driver_context; + + amdgpu_dpm_notify_smu_enable_pwe(adev); +} + +static void pp_rv_set_active_display_count(struct pp_smu *pp, int count) +{ + const struct dc_context *ctx = pp->dm; + struct amdgpu_device *adev = ctx->driver_context; + + amdgpu_dpm_set_active_display_count(adev, count); +} + +static void pp_rv_set_min_deep_sleep_dcfclk(struct pp_smu *pp, int clock) +{ + const struct dc_context *ctx = pp->dm; + struct amdgpu_device *adev = ctx->driver_context; + + amdgpu_dpm_set_min_deep_sleep_dcefclk(adev, clock); +} + +static void pp_rv_set_hard_min_dcefclk_by_freq(struct pp_smu *pp, int clock) +{ + const struct dc_context *ctx = pp->dm; + struct amdgpu_device *adev = ctx->driver_context; + + amdgpu_dpm_set_hard_min_dcefclk_by_freq(adev, clock); +} + +static void pp_rv_set_hard_min_fclk_by_freq(struct pp_smu *pp, int mhz) +{ + const struct dc_context *ctx = pp->dm; + struct amdgpu_device *adev = ctx->driver_context; + + amdgpu_dpm_set_hard_min_fclk_by_freq(adev, mhz); +} + +static enum pp_smu_status pp_nv_set_wm_ranges(struct pp_smu *pp, + struct pp_smu_wm_range_sets *ranges) +{ + const struct dc_context *ctx = pp->dm; + struct amdgpu_device *adev = ctx->driver_context; + + amdgpu_dpm_set_watermarks_for_clocks_ranges(adev, ranges); + + return PP_SMU_RESULT_OK; +} + +static enum pp_smu_status pp_nv_set_display_count(struct pp_smu *pp, int count) +{ + const struct dc_context *ctx = pp->dm; + struct amdgpu_device *adev = ctx->driver_context; + int ret = 0; + + ret = amdgpu_dpm_set_active_display_count(adev, count); + if (ret == -EOPNOTSUPP) + return PP_SMU_RESULT_UNSUPPORTED; + else if (ret) + /* 0: successful or smu.ppt_funcs->set_display_count = NULL; 1: fail */ + return PP_SMU_RESULT_FAIL; + + return PP_SMU_RESULT_OK; +} + +static enum pp_smu_status +pp_nv_set_min_deep_sleep_dcfclk(struct pp_smu *pp, int mhz) +{ + const struct dc_context *ctx = pp->dm; + struct amdgpu_device *adev = ctx->driver_context; + int ret = 0; + + /* 0: successful or smu.ppt_funcs->set_deep_sleep_dcefclk = NULL;1: fail */ + ret = amdgpu_dpm_set_min_deep_sleep_dcefclk(adev, mhz); + if (ret == -EOPNOTSUPP) + return PP_SMU_RESULT_UNSUPPORTED; + else if (ret) + return PP_SMU_RESULT_FAIL; + + return PP_SMU_RESULT_OK; +} + +static enum pp_smu_status pp_nv_set_hard_min_dcefclk_by_freq( + struct pp_smu *pp, int mhz) +{ + const struct dc_context *ctx = pp->dm; + struct amdgpu_device *adev = ctx->driver_context; + struct pp_display_clock_request clock_req; + int ret = 0; + + clock_req.clock_type = amd_pp_dcef_clock; + clock_req.clock_freq_in_khz = mhz * 1000; + + /* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL + * 1: fail + */ + ret = amdgpu_dpm_display_clock_voltage_request(adev, &clock_req); + if (ret == -EOPNOTSUPP) + return PP_SMU_RESULT_UNSUPPORTED; + else if (ret) + return PP_SMU_RESULT_FAIL; + + return PP_SMU_RESULT_OK; +} + +static enum pp_smu_status +pp_nv_set_hard_min_uclk_by_freq(struct pp_smu *pp, int mhz) +{ + const struct dc_context *ctx = pp->dm; + struct amdgpu_device *adev = ctx->driver_context; + struct pp_display_clock_request clock_req; + int ret = 0; + + clock_req.clock_type = amd_pp_mem_clock; + clock_req.clock_freq_in_khz = mhz * 1000; + + /* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL + * 1: fail + */ + ret = amdgpu_dpm_display_clock_voltage_request(adev, &clock_req); + if (ret == -EOPNOTSUPP) + return PP_SMU_RESULT_UNSUPPORTED; + else if (ret) + return PP_SMU_RESULT_FAIL; + + return PP_SMU_RESULT_OK; +} + +static enum pp_smu_status pp_nv_set_pstate_handshake_support( + struct pp_smu *pp, bool pstate_handshake_supported) +{ + const struct dc_context *ctx = pp->dm; + struct amdgpu_device *adev = ctx->driver_context; + + if (amdgpu_dpm_display_disable_memory_clock_switch(adev, + !pstate_handshake_supported)) + return PP_SMU_RESULT_FAIL; + + return PP_SMU_RESULT_OK; +} + +static enum pp_smu_status pp_nv_set_voltage_by_freq(struct pp_smu *pp, + enum pp_smu_nv_clock_id clock_id, int mhz) +{ + const struct dc_context *ctx = pp->dm; + struct amdgpu_device *adev = ctx->driver_context; + struct pp_display_clock_request clock_req; + int ret = 0; + + switch (clock_id) { + case PP_SMU_NV_DISPCLK: + clock_req.clock_type = amd_pp_disp_clock; + break; + case PP_SMU_NV_PHYCLK: + clock_req.clock_type = amd_pp_phy_clock; + break; + case PP_SMU_NV_PIXELCLK: + clock_req.clock_type = amd_pp_pixel_clock; + break; + default: + break; + } + clock_req.clock_freq_in_khz = mhz * 1000; + + /* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL + * 1: fail + */ + ret = amdgpu_dpm_display_clock_voltage_request(adev, &clock_req); + if (ret == -EOPNOTSUPP) + return PP_SMU_RESULT_UNSUPPORTED; + else if (ret) + return PP_SMU_RESULT_FAIL; + + return PP_SMU_RESULT_OK; +} + +static enum pp_smu_status pp_nv_get_maximum_sustainable_clocks( + struct pp_smu *pp, struct pp_smu_nv_clock_table *max_clocks) +{ + const struct dc_context *ctx = pp->dm; + struct amdgpu_device *adev = ctx->driver_context; + int ret = 0; + + ret = amdgpu_dpm_get_max_sustainable_clocks_by_dc(adev, + max_clocks); + if (ret == -EOPNOTSUPP) + return PP_SMU_RESULT_UNSUPPORTED; + else if (ret) + return PP_SMU_RESULT_FAIL; + + return PP_SMU_RESULT_OK; +} + +static enum pp_smu_status pp_nv_get_uclk_dpm_states(struct pp_smu *pp, + unsigned int *clock_values_in_khz, unsigned int *num_states) +{ + const struct dc_context *ctx = pp->dm; + struct amdgpu_device *adev = ctx->driver_context; + int ret = 0; + + ret = amdgpu_dpm_get_uclk_dpm_states(adev, + clock_values_in_khz, + num_states); + if (ret == -EOPNOTSUPP) + return PP_SMU_RESULT_UNSUPPORTED; + else if (ret) + return PP_SMU_RESULT_FAIL; + + return PP_SMU_RESULT_OK; +} + +static enum pp_smu_status pp_rn_get_dpm_clock_table( + struct pp_smu *pp, struct dpm_clocks *clock_table) +{ + const struct dc_context *ctx = pp->dm; + struct amdgpu_device *adev = ctx->driver_context; + int ret = 0; + + ret = amdgpu_dpm_get_dpm_clock_table(adev, clock_table); + if (ret == -EOPNOTSUPP) + return PP_SMU_RESULT_UNSUPPORTED; + else if (ret) + return PP_SMU_RESULT_FAIL; + + return PP_SMU_RESULT_OK; +} + +static enum pp_smu_status pp_rn_set_wm_ranges(struct pp_smu *pp, + struct pp_smu_wm_range_sets *ranges) +{ + const struct dc_context *ctx = pp->dm; + struct amdgpu_device *adev = ctx->driver_context; + + amdgpu_dpm_set_watermarks_for_clocks_ranges(adev, ranges); + + return PP_SMU_RESULT_OK; +} + +void dm_pp_get_funcs( + struct dc_context *ctx, + struct pp_smu_funcs *funcs) +{ + switch (ctx->dce_version) { + case DCN_VERSION_1_0: + case DCN_VERSION_1_01: + funcs->ctx.ver = PP_SMU_VER_RV; + funcs->rv_funcs.pp_smu.dm = ctx; + funcs->rv_funcs.set_wm_ranges = pp_rv_set_wm_ranges; + funcs->rv_funcs.set_pme_wa_enable = pp_rv_set_pme_wa_enable; + funcs->rv_funcs.set_display_count = + pp_rv_set_active_display_count; + funcs->rv_funcs.set_min_deep_sleep_dcfclk = + pp_rv_set_min_deep_sleep_dcfclk; + funcs->rv_funcs.set_hard_min_dcfclk_by_freq = + pp_rv_set_hard_min_dcefclk_by_freq; + funcs->rv_funcs.set_hard_min_fclk_by_freq = + pp_rv_set_hard_min_fclk_by_freq; + break; + case DCN_VERSION_2_0: + funcs->ctx.ver = PP_SMU_VER_NV; + funcs->nv_funcs.pp_smu.dm = ctx; + funcs->nv_funcs.set_display_count = pp_nv_set_display_count; + funcs->nv_funcs.set_hard_min_dcfclk_by_freq = + pp_nv_set_hard_min_dcefclk_by_freq; + funcs->nv_funcs.set_min_deep_sleep_dcfclk = + pp_nv_set_min_deep_sleep_dcfclk; + funcs->nv_funcs.set_voltage_by_freq = + pp_nv_set_voltage_by_freq; + funcs->nv_funcs.set_wm_ranges = pp_nv_set_wm_ranges; + + /* todo set_pme_wa_enable cause 4k@6ohz display not light up */ + funcs->nv_funcs.set_pme_wa_enable = NULL; + /* todo debug waring message */ + funcs->nv_funcs.set_hard_min_uclk_by_freq = pp_nv_set_hard_min_uclk_by_freq; + /* todo compare data with window driver*/ + funcs->nv_funcs.get_maximum_sustainable_clocks = pp_nv_get_maximum_sustainable_clocks; + /*todo compare data with window driver */ + funcs->nv_funcs.get_uclk_dpm_states = pp_nv_get_uclk_dpm_states; + funcs->nv_funcs.set_pstate_handshake_support = pp_nv_set_pstate_handshake_support; + break; + + case DCN_VERSION_2_1: + funcs->ctx.ver = PP_SMU_VER_RN; + funcs->rn_funcs.pp_smu.dm = ctx; + funcs->rn_funcs.set_wm_ranges = pp_rn_set_wm_ranges; + funcs->rn_funcs.get_dpm_clock_table = pp_rn_get_dpm_clock_table; + break; + default: + DRM_ERROR("smu version is not supported !\n"); + break; + } +} diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c new file mode 100644 index 0000000000..08ce3bb8f6 --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c @@ -0,0 +1,220 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "amdgpu_dm_psr.h" +#include "dc_dmub_srv.h" +#include "dc.h" +#include "dm_helpers.h" +#include "amdgpu_dm.h" +#include "modules/power/power_helpers.h" + +static bool link_supports_psrsu(struct dc_link *link) +{ + struct dc *dc = link->ctx->dc; + + if (!dc->caps.dmcub_support) + return false; + + if (dc->ctx->dce_version < DCN_VERSION_3_1) + return false; + + if (!is_psr_su_specific_panel(link)) + return false; + + if (!link->dpcd_caps.alpm_caps.bits.AUX_WAKE_ALPM_CAP || + !link->dpcd_caps.psr_info.psr_dpcd_caps.bits.Y_COORDINATE_REQUIRED) + return false; + + if (link->dpcd_caps.psr_info.psr_dpcd_caps.bits.SU_GRANULARITY_REQUIRED && + !link->dpcd_caps.psr_info.psr2_su_y_granularity_cap) + return false; + + return dc_dmub_check_min_version(dc->ctx->dmub_srv->dmub); +} + +/* + * amdgpu_dm_set_psr_caps() - set link psr capabilities + * @link: link + * + */ +void amdgpu_dm_set_psr_caps(struct dc_link *link) +{ + if (!(link->connector_signal & SIGNAL_TYPE_EDP)) { + link->psr_settings.psr_feature_enabled = false; + return; + } + + if (link->type == dc_connection_none) { + link->psr_settings.psr_feature_enabled = false; + return; + } + + if (link->dpcd_caps.psr_info.psr_version == 0) { + link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED; + link->psr_settings.psr_feature_enabled = false; + + } else { + if (link_supports_psrsu(link)) + link->psr_settings.psr_version = DC_PSR_VERSION_SU_1; + else + link->psr_settings.psr_version = DC_PSR_VERSION_1; + + link->psr_settings.psr_feature_enabled = true; + } + + DRM_INFO("PSR support %d, DC PSR ver %d, sink PSR ver %d DPCD caps 0x%x su_y_granularity %d\n", + link->psr_settings.psr_feature_enabled, + link->psr_settings.psr_version, + link->dpcd_caps.psr_info.psr_version, + link->dpcd_caps.psr_info.psr_dpcd_caps.raw, + link->dpcd_caps.psr_info.psr2_su_y_granularity_cap); + +} + +/* + * amdgpu_dm_link_setup_psr() - configure psr link + * @stream: stream state + * + * Return: true if success + */ +bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream) +{ + struct dc_link *link = NULL; + struct psr_config psr_config = {0}; + struct psr_context psr_context = {0}; + struct dc *dc = NULL; + bool ret = false; + + if (stream == NULL) + return false; + + link = stream->link; + dc = link->ctx->dc; + + if (link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED) { + mod_power_calc_psr_configs(&psr_config, link, stream); + + /* linux DM specific updating for psr config fields */ + psr_config.allow_smu_optimizations = + (amdgpu_dc_feature_mask & DC_PSR_ALLOW_SMU_OPT) && + mod_power_only_edp(dc->current_state, stream); + psr_config.allow_multi_disp_optimizations = + (amdgpu_dc_feature_mask & DC_PSR_ALLOW_MULTI_DISP_OPT); + + if (!psr_su_set_dsc_slice_height(dc, link, stream, &psr_config)) + return false; + + ret = dc_link_setup_psr(link, stream, &psr_config, &psr_context); + + } + DRM_DEBUG_DRIVER("PSR link: %d\n", link->psr_settings.psr_feature_enabled); + + return ret; +} + +/* + * amdgpu_dm_psr_enable() - enable psr f/w + * @stream: stream state + * + * Return: true if success + */ +bool amdgpu_dm_psr_enable(struct dc_stream_state *stream) +{ + struct dc_link *link = stream->link; + unsigned int vsync_rate_hz = 0; + struct dc_static_screen_params params = {0}; + /* Calculate number of static frames before generating interrupt to + * enter PSR. + */ + // Init fail safe of 2 frames static + unsigned int num_frames_static = 2; + unsigned int power_opt = 0; + bool psr_enable = true; + + DRM_DEBUG_DRIVER("Enabling psr...\n"); + + vsync_rate_hz = div64_u64(div64_u64(( + stream->timing.pix_clk_100hz * 100), + stream->timing.v_total), + stream->timing.h_total); + + /* Round up + * Calculate number of frames such that at least 30 ms of time has + * passed. + */ + if (vsync_rate_hz != 0) { + unsigned int frame_time_microsec = 1000000 / vsync_rate_hz; + + num_frames_static = (30000 / frame_time_microsec) + 1; + } + + params.triggers.cursor_update = true; + params.triggers.overlay_update = true; + params.triggers.surface_update = true; + params.num_frames = num_frames_static; + + dc_stream_set_static_screen_params(link->ctx->dc, + &stream, 1, + ¶ms); + + /* + * Only enable static-screen optimizations for PSR1. For PSR SU, this + * causes vstartup interrupt issues, used by amdgpu_dm to send vblank + * events. + */ + if (link->psr_settings.psr_version < DC_PSR_VERSION_SU_1) + power_opt |= psr_power_opt_z10_static_screen; + + return dc_link_set_psr_allow_active(link, &psr_enable, false, false, &power_opt); +} + +/* + * amdgpu_dm_psr_disable() - disable psr f/w + * @stream: stream state + * + * Return: true if success + */ +bool amdgpu_dm_psr_disable(struct dc_stream_state *stream) +{ + unsigned int power_opt = 0; + bool psr_enable = false; + + DRM_DEBUG_DRIVER("Disabling psr...\n"); + + return dc_link_set_psr_allow_active(stream->link, &psr_enable, true, false, &power_opt); +} + +/* + * amdgpu_dm_psr_disable() - disable psr f/w + * if psr is enabled on any stream + * + * Return: true if success + */ +bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm) +{ + DRM_DEBUG_DRIVER("Disabling psr if psr is enabled on any stream\n"); + return dc_set_psr_allow_active(dm->dc, false); +} + diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h new file mode 100644 index 0000000000..6806b3c9c8 --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h @@ -0,0 +1,40 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef AMDGPU_DM_AMDGPU_DM_PSR_H_ +#define AMDGPU_DM_AMDGPU_DM_PSR_H_ + +#include "amdgpu.h" + +/* the number of pageflips before enabling psr */ +#define AMDGPU_DM_PSR_ENTRY_DELAY 5 + +void amdgpu_dm_set_psr_caps(struct dc_link *link); +bool amdgpu_dm_psr_enable(struct dc_stream_state *stream); +bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream); +bool amdgpu_dm_psr_disable(struct dc_stream_state *stream); +bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm); + +#endif /* AMDGPU_DM_AMDGPU_DM_PSR_H_ */ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c new file mode 100644 index 0000000000..32d3086c4c --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.c @@ -0,0 +1,183 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "amdgpu_dm_replay.h" +#include "dc.h" +#include "dm_helpers.h" +#include "amdgpu_dm.h" +#include "modules/power/power_helpers.h" +#include "dmub/inc/dmub_cmd.h" +#include "dc/inc/link.h" + +/* + * link_supports_replay() - check if the link supports replay + * @link: link + * @aconnector: aconnector + * + */ +static bool link_supports_replay(struct dc_link *link, struct amdgpu_dm_connector *aconnector) +{ + struct dm_connector_state *state = to_dm_connector_state(aconnector->base.state); + struct dpcd_caps *dpcd_caps = &link->dpcd_caps; + struct adaptive_sync_caps *as_caps = &link->dpcd_caps.adaptive_sync_caps; + + if (!state->freesync_capable) + return false; + + if (!aconnector->vsdb_info.replay_mode) + return false; + + // Check the eDP version + if (dpcd_caps->edp_rev < EDP_REVISION_13) + return false; + + if (!dpcd_caps->alpm_caps.bits.AUX_WAKE_ALPM_CAP) + return false; + + // Check adaptive sync support cap + if (!as_caps->dp_adap_sync_caps.bits.ADAPTIVE_SYNC_SDP_SUPPORT) + return false; + + return true; +} + +/* + * amdgpu_dm_setup_replay() - setup replay configuration + * @link: link + * @aconnector: aconnector + * + */ +bool amdgpu_dm_setup_replay(struct dc_link *link, struct amdgpu_dm_connector *aconnector) +{ + struct replay_config pr_config; + union replay_debug_flags *debug_flags = NULL; + + // For eDP, if Replay is supported, return true to skip checks + if (link->replay_settings.config.replay_supported) + return true; + + if (!dc_is_embedded_signal(link->connector_signal)) + return false; + + if (link->panel_config.psr.disallow_replay) + return false; + + if (!link_supports_replay(link, aconnector)) + return false; + + // Mark Replay is supported in link and update related attributes + pr_config.replay_supported = true; + pr_config.replay_power_opt_supported = 0; + pr_config.replay_enable_option |= pr_enable_option_static_screen; + pr_config.replay_timing_sync_supported = aconnector->max_vfreq >= 2 * aconnector->min_vfreq ? true : false; + + if (!pr_config.replay_timing_sync_supported) + pr_config.replay_enable_option &= ~pr_enable_option_general_ui; + + debug_flags = (union replay_debug_flags *)&pr_config.debug_flags; + debug_flags->u32All = 0; + debug_flags->bitfields.visual_confirm = + link->ctx->dc->debug.visual_confirm == VISUAL_CONFIRM_REPLAY ? true : false; + + link->replay_settings.replay_feature_enabled = true; + + init_replay_config(link, &pr_config); + + return true; +} + + +/* + * amdgpu_dm_replay_enable() - enable replay f/w + * @stream: stream state + * + * Return: true if success + */ +bool amdgpu_dm_replay_enable(struct dc_stream_state *stream, bool wait) +{ + uint64_t state; + unsigned int retry_count; + bool replay_active = true; + const unsigned int max_retry = 1000; + bool force_static = true; + struct dc_link *link = NULL; + + + if (stream == NULL) + return false; + + link = stream->link; + + if (link == NULL) + return false; + + link->dc->link_srv->edp_setup_replay(link, stream); + + link->dc->link_srv->edp_set_replay_allow_active(link, NULL, false, false, NULL); + + link->dc->link_srv->edp_set_replay_allow_active(link, &replay_active, false, true, NULL); + + if (wait == true) { + + for (retry_count = 0; retry_count <= max_retry; retry_count++) { + dc_link_get_replay_state(link, &state); + if (replay_active) { + if (state != REPLAY_STATE_0 && + (!force_static || state == REPLAY_STATE_3)) + break; + } else { + if (state == REPLAY_STATE_0) + break; + } + udelay(500); + } + + /* assert if max retry hit */ + if (retry_count >= max_retry) + ASSERT(0); + } else { + /* To-do: Add trace log */ + } + + return true; +} + +/* + * amdgpu_dm_replay_disable() - disable replay f/w + * @stream: stream state + * + * Return: true if success + */ +bool amdgpu_dm_replay_disable(struct dc_stream_state *stream) +{ + + if (stream->link) { + DRM_DEBUG_DRIVER("Disabling replay...\n"); + stream->link->dc->link_srv->edp_set_replay_allow_active(stream->link, NULL, false, false, NULL); + return true; + } + + return false; +} diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h new file mode 100644 index 0000000000..01cba3cd62 --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_replay.h @@ -0,0 +1,46 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef AMDGPU_DM_AMDGPU_DM_REPLAY_H_ +#define AMDGPU_DM_AMDGPU_DM_REPLAY_H_ + +#include "amdgpu.h" + +enum replay_enable_option { + pr_enable_option_static_screen = 0x1, + pr_enable_option_mpo_video = 0x2, + pr_enable_option_full_screen_video = 0x4, + pr_enable_option_general_ui = 0x8, + pr_enable_option_static_screen_coasting = 0x10000, + pr_enable_option_mpo_video_coasting = 0x20000, + pr_enable_option_full_screen_video_coasting = 0x40000, +}; + + +bool amdgpu_dm_replay_enable(struct dc_stream_state *stream, bool enable); +bool amdgpu_dm_setup_replay(struct dc_link *link, struct amdgpu_dm_connector *aconnector); +bool amdgpu_dm_replay_disable(struct dc_stream_state *stream); + +#endif /* AMDGPU_DM_AMDGPU_DM_REPLAY_H_ */ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c new file mode 100644 index 0000000000..d9e33c6bcc --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c @@ -0,0 +1,55 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include <linux/string.h> +#include <linux/acpi.h> + +#include <drm/drm_probe_helper.h> +#include <drm/amdgpu_drm.h> +#include "dm_services.h" +#include "amdgpu.h" +#include "amdgpu_dm.h" +#include "amdgpu_dm_irq.h" +#include "amdgpu_pm.h" +#include "amdgpu_dm_trace.h" + + unsigned long long + dm_get_elapse_time_in_ns(struct dc_context *ctx, + unsigned long long current_time_stamp, + unsigned long long last_time_stamp) +{ + return current_time_stamp - last_time_stamp; +} + +void dm_perf_trace_timestamp(const char *func_name, unsigned int line, struct dc_context *ctx) +{ + trace_amdgpu_dc_performance(ctx->perf_trace->read_count, + ctx->perf_trace->write_count, + &ctx->perf_trace->last_entry_read, + &ctx->perf_trace->last_entry_write, + func_name, line); +} + +/**** power component interfaces ****/ diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h new file mode 100644 index 0000000000..0f580ea375 --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_trace.h @@ -0,0 +1,734 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#undef TRACE_SYSTEM +#define TRACE_SYSTEM amdgpu_dm + +#if !defined(_AMDGPU_DM_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ) +#define _AMDGPU_DM_TRACE_H_ + +#include <linux/tracepoint.h> +#include <drm/drm_connector.h> +#include <drm/drm_crtc.h> +#include <drm/drm_plane.h> +#include <drm/drm_fourcc.h> +#include <drm/drm_framebuffer.h> +#include <drm/drm_encoder.h> +#include <drm/drm_atomic.h> +#include "dcn10/dcn10_optc.h" + +#include "dc/inc/core_types.h" + +DECLARE_EVENT_CLASS(amdgpu_dc_reg_template, + TP_PROTO(unsigned long *count, uint32_t reg, uint32_t value), + TP_ARGS(count, reg, value), + + TP_STRUCT__entry( + __field(uint32_t, reg) + __field(uint32_t, value) + ), + + TP_fast_assign( + __entry->reg = reg; + __entry->value = value; + *count = *count + 1; + ), + + TP_printk("reg=0x%08lx, value=0x%08lx", + (unsigned long)__entry->reg, + (unsigned long)__entry->value) +); + +DEFINE_EVENT(amdgpu_dc_reg_template, amdgpu_dc_rreg, + TP_PROTO(unsigned long *count, uint32_t reg, uint32_t value), + TP_ARGS(count, reg, value)); + +DEFINE_EVENT(amdgpu_dc_reg_template, amdgpu_dc_wreg, + TP_PROTO(unsigned long *count, uint32_t reg, uint32_t value), + TP_ARGS(count, reg, value)); + +TRACE_EVENT(amdgpu_dc_performance, + TP_PROTO(unsigned long read_count, unsigned long write_count, + unsigned long *last_read, unsigned long *last_write, + const char *func, unsigned int line), + TP_ARGS(read_count, write_count, last_read, last_write, func, line), + TP_STRUCT__entry( + __field(uint32_t, reads) + __field(uint32_t, writes) + __field(uint32_t, read_delta) + __field(uint32_t, write_delta) + __string(func, func) + __field(uint32_t, line) + ), + TP_fast_assign( + __entry->reads = read_count; + __entry->writes = write_count; + __entry->read_delta = read_count - *last_read; + __entry->write_delta = write_count - *last_write; + __assign_str(func, func); + __entry->line = line; + *last_read = read_count; + *last_write = write_count; + ), + TP_printk("%s:%d reads=%08ld (%08ld total), writes=%08ld (%08ld total)", + __get_str(func), __entry->line, + (unsigned long)__entry->read_delta, + (unsigned long)__entry->reads, + (unsigned long)__entry->write_delta, + (unsigned long)__entry->writes) +); + +TRACE_EVENT(amdgpu_dm_connector_atomic_check, + TP_PROTO(const struct drm_connector_state *state), + TP_ARGS(state), + + TP_STRUCT__entry( + __field(uint32_t, conn_id) + __field(const struct drm_connector_state *, conn_state) + __field(const struct drm_atomic_state *, state) + __field(const struct drm_crtc_commit *, commit) + __field(uint32_t, crtc_id) + __field(uint32_t, best_encoder_id) + __field(enum drm_link_status, link_status) + __field(bool, self_refresh_aware) + __field(enum hdmi_picture_aspect, picture_aspect_ratio) + __field(unsigned int, content_type) + __field(unsigned int, hdcp_content_type) + __field(unsigned int, content_protection) + __field(unsigned int, scaling_mode) + __field(u32, colorspace) + __field(u8, max_requested_bpc) + __field(u8, max_bpc) + ), + + TP_fast_assign( + __entry->conn_id = state->connector->base.id; + __entry->conn_state = state; + __entry->state = state->state; + __entry->commit = state->commit; + __entry->crtc_id = state->crtc ? state->crtc->base.id : 0; + __entry->best_encoder_id = state->best_encoder ? + state->best_encoder->base.id : 0; + __entry->link_status = state->link_status; + __entry->self_refresh_aware = state->self_refresh_aware; + __entry->picture_aspect_ratio = state->picture_aspect_ratio; + __entry->content_type = state->content_type; + __entry->hdcp_content_type = state->hdcp_content_type; + __entry->content_protection = state->content_protection; + __entry->scaling_mode = state->scaling_mode; + __entry->colorspace = state->colorspace; + __entry->max_requested_bpc = state->max_requested_bpc; + __entry->max_bpc = state->max_bpc; + ), + + TP_printk("conn_id=%u conn_state=%p state=%p commit=%p crtc_id=%u " + "best_encoder_id=%u link_status=%d self_refresh_aware=%d " + "picture_aspect_ratio=%d content_type=%u " + "hdcp_content_type=%u content_protection=%u scaling_mode=%u " + "colorspace=%u max_requested_bpc=%u max_bpc=%u", + __entry->conn_id, __entry->conn_state, __entry->state, + __entry->commit, __entry->crtc_id, __entry->best_encoder_id, + __entry->link_status, __entry->self_refresh_aware, + __entry->picture_aspect_ratio, __entry->content_type, + __entry->hdcp_content_type, __entry->content_protection, + __entry->scaling_mode, __entry->colorspace, + __entry->max_requested_bpc, __entry->max_bpc) +); + +TRACE_EVENT(amdgpu_dm_crtc_atomic_check, + TP_PROTO(const struct drm_crtc_state *state), + TP_ARGS(state), + + TP_STRUCT__entry( + __field(const struct drm_atomic_state *, state) + __field(const struct drm_crtc_state *, crtc_state) + __field(const struct drm_crtc_commit *, commit) + __field(uint32_t, crtc_id) + __field(bool, enable) + __field(bool, active) + __field(bool, planes_changed) + __field(bool, mode_changed) + __field(bool, active_changed) + __field(bool, connectors_changed) + __field(bool, zpos_changed) + __field(bool, color_mgmt_changed) + __field(bool, no_vblank) + __field(bool, async_flip) + __field(bool, vrr_enabled) + __field(bool, self_refresh_active) + __field(u32, plane_mask) + __field(u32, connector_mask) + __field(u32, encoder_mask) + ), + + TP_fast_assign( + __entry->state = state->state; + __entry->crtc_state = state; + __entry->crtc_id = state->crtc->base.id; + __entry->commit = state->commit; + __entry->enable = state->enable; + __entry->active = state->active; + __entry->planes_changed = state->planes_changed; + __entry->mode_changed = state->mode_changed; + __entry->active_changed = state->active_changed; + __entry->connectors_changed = state->connectors_changed; + __entry->zpos_changed = state->zpos_changed; + __entry->color_mgmt_changed = state->color_mgmt_changed; + __entry->no_vblank = state->no_vblank; + __entry->async_flip = state->async_flip; + __entry->vrr_enabled = state->vrr_enabled; + __entry->self_refresh_active = state->self_refresh_active; + __entry->plane_mask = state->plane_mask; + __entry->connector_mask = state->connector_mask; + __entry->encoder_mask = state->encoder_mask; + ), + + TP_printk("crtc_id=%u crtc_state=%p state=%p commit=%p changed(" + "planes=%d mode=%d active=%d conn=%d zpos=%d color_mgmt=%d) " + "state(enable=%d active=%d async_flip=%d vrr_enabled=%d " + "self_refresh_active=%d no_vblank=%d) mask(plane=%x conn=%x " + "enc=%x)", + __entry->crtc_id, __entry->crtc_state, __entry->state, + __entry->commit, __entry->planes_changed, + __entry->mode_changed, __entry->active_changed, + __entry->connectors_changed, __entry->zpos_changed, + __entry->color_mgmt_changed, __entry->enable, __entry->active, + __entry->async_flip, __entry->vrr_enabled, + __entry->self_refresh_active, __entry->no_vblank, + __entry->plane_mask, __entry->connector_mask, + __entry->encoder_mask) +); + +DECLARE_EVENT_CLASS(amdgpu_dm_plane_state_template, + TP_PROTO(const struct drm_plane_state *state), + TP_ARGS(state), + TP_STRUCT__entry( + __field(uint32_t, plane_id) + __field(enum drm_plane_type, plane_type) + __field(const struct drm_plane_state *, plane_state) + __field(const struct drm_atomic_state *, state) + __field(uint32_t, crtc_id) + __field(uint32_t, fb_id) + __field(uint32_t, fb_format) + __field(uint8_t, fb_planes) + __field(uint64_t, fb_modifier) + __field(const struct dma_fence *, fence) + __field(int32_t, crtc_x) + __field(int32_t, crtc_y) + __field(uint32_t, crtc_w) + __field(uint32_t, crtc_h) + __field(uint32_t, src_x) + __field(uint32_t, src_y) + __field(uint32_t, src_w) + __field(uint32_t, src_h) + __field(u32, alpha) + __field(uint32_t, pixel_blend_mode) + __field(unsigned int, rotation) + __field(unsigned int, zpos) + __field(unsigned int, normalized_zpos) + __field(enum drm_color_encoding, color_encoding) + __field(enum drm_color_range, color_range) + __field(bool, visible) + ), + + TP_fast_assign( + __entry->plane_id = state->plane->base.id; + __entry->plane_type = state->plane->type; + __entry->plane_state = state; + __entry->state = state->state; + __entry->crtc_id = state->crtc ? state->crtc->base.id : 0; + __entry->fb_id = state->fb ? state->fb->base.id : 0; + __entry->fb_format = state->fb ? state->fb->format->format : 0; + __entry->fb_planes = state->fb ? state->fb->format->num_planes : 0; + __entry->fb_modifier = state->fb ? state->fb->modifier : 0; + __entry->fence = state->fence; + __entry->crtc_x = state->crtc_x; + __entry->crtc_y = state->crtc_y; + __entry->crtc_w = state->crtc_w; + __entry->crtc_h = state->crtc_h; + __entry->src_x = state->src_x >> 16; + __entry->src_y = state->src_y >> 16; + __entry->src_w = state->src_w >> 16; + __entry->src_h = state->src_h >> 16; + __entry->alpha = state->alpha; + __entry->pixel_blend_mode = state->pixel_blend_mode; + __entry->rotation = state->rotation; + __entry->zpos = state->zpos; + __entry->normalized_zpos = state->normalized_zpos; + __entry->color_encoding = state->color_encoding; + __entry->color_range = state->color_range; + __entry->visible = state->visible; + ), + + TP_printk("plane_id=%u plane_type=%d plane_state=%p state=%p " + "crtc_id=%u fb(id=%u fmt=%c%c%c%c planes=%u mod=%llu) " + "fence=%p crtc_x=%d crtc_y=%d crtc_w=%u crtc_h=%u " + "src_x=%u src_y=%u src_w=%u src_h=%u alpha=%u " + "pixel_blend_mode=%u rotation=%u zpos=%u " + "normalized_zpos=%u color_encoding=%d color_range=%d " + "visible=%d", + __entry->plane_id, __entry->plane_type, __entry->plane_state, + __entry->state, __entry->crtc_id, __entry->fb_id, + (__entry->fb_format & 0xff) ? (__entry->fb_format & 0xff) : 'N', + ((__entry->fb_format >> 8) & 0xff) ? ((__entry->fb_format >> 8) & 0xff) : 'O', + ((__entry->fb_format >> 16) & 0xff) ? ((__entry->fb_format >> 16) & 0xff) : 'N', + ((__entry->fb_format >> 24) & 0x7f) ? ((__entry->fb_format >> 24) & 0x7f) : 'E', + __entry->fb_planes, + __entry->fb_modifier, __entry->fence, __entry->crtc_x, + __entry->crtc_y, __entry->crtc_w, __entry->crtc_h, + __entry->src_x, __entry->src_y, __entry->src_w, __entry->src_h, + __entry->alpha, __entry->pixel_blend_mode, __entry->rotation, + __entry->zpos, __entry->normalized_zpos, + __entry->color_encoding, __entry->color_range, + __entry->visible) +); + +DEFINE_EVENT(amdgpu_dm_plane_state_template, amdgpu_dm_plane_atomic_check, + TP_PROTO(const struct drm_plane_state *state), + TP_ARGS(state)); + +DEFINE_EVENT(amdgpu_dm_plane_state_template, amdgpu_dm_atomic_update_cursor, + TP_PROTO(const struct drm_plane_state *state), + TP_ARGS(state)); + +TRACE_EVENT(amdgpu_dm_atomic_state_template, + TP_PROTO(const struct drm_atomic_state *state), + TP_ARGS(state), + + TP_STRUCT__entry( + __field(const struct drm_atomic_state *, state) + __field(bool, allow_modeset) + __field(bool, legacy_cursor_update) + __field(bool, async_update) + __field(bool, duplicated) + __field(int, num_connector) + __field(int, num_private_objs) + ), + + TP_fast_assign( + __entry->state = state; + __entry->allow_modeset = state->allow_modeset; + __entry->legacy_cursor_update = state->legacy_cursor_update; + __entry->async_update = state->async_update; + __entry->duplicated = state->duplicated; + __entry->num_connector = state->num_connector; + __entry->num_private_objs = state->num_private_objs; + ), + + TP_printk("state=%p allow_modeset=%d legacy_cursor_update=%d " + "async_update=%d duplicated=%d num_connector=%d " + "num_private_objs=%d", + __entry->state, __entry->allow_modeset, __entry->legacy_cursor_update, + __entry->async_update, __entry->duplicated, __entry->num_connector, + __entry->num_private_objs) +); + +DEFINE_EVENT(amdgpu_dm_atomic_state_template, amdgpu_dm_atomic_commit_tail_begin, + TP_PROTO(const struct drm_atomic_state *state), + TP_ARGS(state)); + +DEFINE_EVENT(amdgpu_dm_atomic_state_template, amdgpu_dm_atomic_commit_tail_finish, + TP_PROTO(const struct drm_atomic_state *state), + TP_ARGS(state)); + +DEFINE_EVENT(amdgpu_dm_atomic_state_template, amdgpu_dm_atomic_check_begin, + TP_PROTO(const struct drm_atomic_state *state), + TP_ARGS(state)); + +TRACE_EVENT(amdgpu_dm_atomic_check_finish, + TP_PROTO(const struct drm_atomic_state *state, int res), + TP_ARGS(state, res), + + TP_STRUCT__entry( + __field(const struct drm_atomic_state *, state) + __field(int, res) + __field(bool, async_update) + __field(bool, allow_modeset) + ), + + TP_fast_assign( + __entry->state = state; + __entry->res = res; + __entry->async_update = state->async_update; + __entry->allow_modeset = state->allow_modeset; + ), + + TP_printk("state=%p res=%d async_update=%d allow_modeset=%d", + __entry->state, __entry->res, + __entry->async_update, __entry->allow_modeset) +); + +TRACE_EVENT(amdgpu_dm_dc_pipe_state, + TP_PROTO(int pipe_idx, const struct dc_plane_state *plane_state, + const struct dc_stream_state *stream, + const struct plane_resource *plane_res, + int update_flags), + TP_ARGS(pipe_idx, plane_state, stream, plane_res, update_flags), + + TP_STRUCT__entry( + __field(int, pipe_idx) + __field(const void *, stream) + __field(int, stream_w) + __field(int, stream_h) + __field(int, dst_x) + __field(int, dst_y) + __field(int, dst_w) + __field(int, dst_h) + __field(int, src_x) + __field(int, src_y) + __field(int, src_w) + __field(int, src_h) + __field(int, clip_x) + __field(int, clip_y) + __field(int, clip_w) + __field(int, clip_h) + __field(int, recout_x) + __field(int, recout_y) + __field(int, recout_w) + __field(int, recout_h) + __field(int, viewport_x) + __field(int, viewport_y) + __field(int, viewport_w) + __field(int, viewport_h) + __field(int, flip_immediate) + __field(int, surface_pitch) + __field(int, format) + __field(int, swizzle) + __field(unsigned int, update_flags) + ), + + TP_fast_assign( + __entry->pipe_idx = pipe_idx; + __entry->stream = stream; + __entry->stream_w = stream->timing.h_addressable; + __entry->stream_h = stream->timing.v_addressable; + __entry->dst_x = plane_state->dst_rect.x; + __entry->dst_y = plane_state->dst_rect.y; + __entry->dst_w = plane_state->dst_rect.width; + __entry->dst_h = plane_state->dst_rect.height; + __entry->src_x = plane_state->src_rect.x; + __entry->src_y = plane_state->src_rect.y; + __entry->src_w = plane_state->src_rect.width; + __entry->src_h = plane_state->src_rect.height; + __entry->clip_x = plane_state->clip_rect.x; + __entry->clip_y = plane_state->clip_rect.y; + __entry->clip_w = plane_state->clip_rect.width; + __entry->clip_h = plane_state->clip_rect.height; + __entry->recout_x = plane_res->scl_data.recout.x; + __entry->recout_y = plane_res->scl_data.recout.y; + __entry->recout_w = plane_res->scl_data.recout.width; + __entry->recout_h = plane_res->scl_data.recout.height; + __entry->viewport_x = plane_res->scl_data.viewport.x; + __entry->viewport_y = plane_res->scl_data.viewport.y; + __entry->viewport_w = plane_res->scl_data.viewport.width; + __entry->viewport_h = plane_res->scl_data.viewport.height; + __entry->flip_immediate = plane_state->flip_immediate; + __entry->surface_pitch = plane_state->plane_size.surface_pitch; + __entry->format = plane_state->format; + __entry->swizzle = plane_state->tiling_info.gfx9.swizzle; + __entry->update_flags = update_flags; + ), + TP_printk("pipe_idx=%d stream=%p rct(%d,%d) dst=(%d,%d,%d,%d) " + "src=(%d,%d,%d,%d) clip=(%d,%d,%d,%d) recout=(%d,%d,%d,%d) " + "viewport=(%d,%d,%d,%d) flip_immediate=%d pitch=%d " + "format=%d swizzle=%d update_flags=%x", + __entry->pipe_idx, + __entry->stream, + __entry->stream_w, + __entry->stream_h, + __entry->dst_x, + __entry->dst_y, + __entry->dst_w, + __entry->dst_h, + __entry->src_x, + __entry->src_y, + __entry->src_w, + __entry->src_h, + __entry->clip_x, + __entry->clip_y, + __entry->clip_w, + __entry->clip_h, + __entry->recout_x, + __entry->recout_y, + __entry->recout_w, + __entry->recout_h, + __entry->viewport_x, + __entry->viewport_y, + __entry->viewport_w, + __entry->viewport_h, + __entry->flip_immediate, + __entry->surface_pitch, + __entry->format, + __entry->swizzle, + __entry->update_flags + ) +); + +TRACE_EVENT(amdgpu_dm_dc_clocks_state, + TP_PROTO(const struct dc_clocks *clk), + TP_ARGS(clk), + + TP_STRUCT__entry( + __field(int, dispclk_khz) + __field(int, dppclk_khz) + __field(int, disp_dpp_voltage_level_khz) + __field(int, dcfclk_khz) + __field(int, socclk_khz) + __field(int, dcfclk_deep_sleep_khz) + __field(int, fclk_khz) + __field(int, phyclk_khz) + __field(int, dramclk_khz) + __field(int, p_state_change_support) + __field(int, prev_p_state_change_support) + __field(int, pwr_state) + __field(int, dtm_level) + __field(int, max_supported_dppclk_khz) + __field(int, max_supported_dispclk_khz) + __field(int, bw_dppclk_khz) + __field(int, bw_dispclk_khz) + ), + TP_fast_assign( + __entry->dispclk_khz = clk->dispclk_khz; + __entry->dppclk_khz = clk->dppclk_khz; + __entry->dcfclk_khz = clk->dcfclk_khz; + __entry->socclk_khz = clk->socclk_khz; + __entry->dcfclk_deep_sleep_khz = clk->dcfclk_deep_sleep_khz; + __entry->fclk_khz = clk->fclk_khz; + __entry->phyclk_khz = clk->phyclk_khz; + __entry->dramclk_khz = clk->dramclk_khz; + __entry->p_state_change_support = clk->p_state_change_support; + __entry->prev_p_state_change_support = clk->prev_p_state_change_support; + __entry->pwr_state = clk->pwr_state; + __entry->prev_p_state_change_support = clk->prev_p_state_change_support; + __entry->dtm_level = clk->dtm_level; + __entry->max_supported_dppclk_khz = clk->max_supported_dppclk_khz; + __entry->max_supported_dispclk_khz = clk->max_supported_dispclk_khz; + __entry->bw_dppclk_khz = clk->bw_dppclk_khz; + __entry->bw_dispclk_khz = clk->bw_dispclk_khz; + ), + TP_printk("dispclk_khz=%d dppclk_khz=%d disp_dpp_voltage_level_khz=%d dcfclk_khz=%d socclk_khz=%d " + "dcfclk_deep_sleep_khz=%d fclk_khz=%d phyclk_khz=%d " + "dramclk_khz=%d p_state_change_support=%d " + "prev_p_state_change_support=%d pwr_state=%d prev_p_state_change_support=%d " + "dtm_level=%d max_supported_dppclk_khz=%d max_supported_dispclk_khz=%d " + "bw_dppclk_khz=%d bw_dispclk_khz=%d ", + __entry->dispclk_khz, + __entry->dppclk_khz, + __entry->disp_dpp_voltage_level_khz, + __entry->dcfclk_khz, + __entry->socclk_khz, + __entry->dcfclk_deep_sleep_khz, + __entry->fclk_khz, + __entry->phyclk_khz, + __entry->dramclk_khz, + __entry->p_state_change_support, + __entry->prev_p_state_change_support, + __entry->pwr_state, + __entry->prev_p_state_change_support, + __entry->dtm_level, + __entry->max_supported_dppclk_khz, + __entry->max_supported_dispclk_khz, + __entry->bw_dppclk_khz, + __entry->bw_dispclk_khz + ) +); + +TRACE_EVENT(amdgpu_dm_dce_clocks_state, + TP_PROTO(const struct dce_bw_output *clk), + TP_ARGS(clk), + + TP_STRUCT__entry( + __field(bool, cpuc_state_change_enable) + __field(bool, cpup_state_change_enable) + __field(bool, stutter_mode_enable) + __field(bool, nbp_state_change_enable) + __field(bool, all_displays_in_sync) + __field(int, sclk_khz) + __field(int, sclk_deep_sleep_khz) + __field(int, yclk_khz) + __field(int, dispclk_khz) + __field(int, blackout_recovery_time_us) + ), + TP_fast_assign( + __entry->cpuc_state_change_enable = clk->cpuc_state_change_enable; + __entry->cpup_state_change_enable = clk->cpup_state_change_enable; + __entry->stutter_mode_enable = clk->stutter_mode_enable; + __entry->nbp_state_change_enable = clk->nbp_state_change_enable; + __entry->all_displays_in_sync = clk->all_displays_in_sync; + __entry->sclk_khz = clk->sclk_khz; + __entry->sclk_deep_sleep_khz = clk->sclk_deep_sleep_khz; + __entry->yclk_khz = clk->yclk_khz; + __entry->dispclk_khz = clk->dispclk_khz; + __entry->blackout_recovery_time_us = clk->blackout_recovery_time_us; + ), + TP_printk("cpuc_state_change_enable=%d cpup_state_change_enable=%d stutter_mode_enable=%d " + "nbp_state_change_enable=%d all_displays_in_sync=%d sclk_khz=%d sclk_deep_sleep_khz=%d " + "yclk_khz=%d dispclk_khz=%d blackout_recovery_time_us=%d", + __entry->cpuc_state_change_enable, + __entry->cpup_state_change_enable, + __entry->stutter_mode_enable, + __entry->nbp_state_change_enable, + __entry->all_displays_in_sync, + __entry->sclk_khz, + __entry->sclk_deep_sleep_khz, + __entry->yclk_khz, + __entry->dispclk_khz, + __entry->blackout_recovery_time_us + ) +); + +TRACE_EVENT(amdgpu_dmub_trace_high_irq, + TP_PROTO(uint32_t trace_code, uint32_t tick_count, uint32_t param0, + uint32_t param1), + TP_ARGS(trace_code, tick_count, param0, param1), + TP_STRUCT__entry( + __field(uint32_t, trace_code) + __field(uint32_t, tick_count) + __field(uint32_t, param0) + __field(uint32_t, param1) + ), + TP_fast_assign( + __entry->trace_code = trace_code; + __entry->tick_count = tick_count; + __entry->param0 = param0; + __entry->param1 = param1; + ), + TP_printk("trace_code=%u tick_count=%u param0=%u param1=%u", + __entry->trace_code, __entry->tick_count, + __entry->param0, __entry->param1) +); + +TRACE_EVENT(amdgpu_refresh_rate_track, + TP_PROTO(int crtc_index, ktime_t refresh_rate_ns, uint32_t refresh_rate_hz), + TP_ARGS(crtc_index, refresh_rate_ns, refresh_rate_hz), + TP_STRUCT__entry( + __field(int, crtc_index) + __field(ktime_t, refresh_rate_ns) + __field(uint32_t, refresh_rate_hz) + ), + TP_fast_assign( + __entry->crtc_index = crtc_index; + __entry->refresh_rate_ns = refresh_rate_ns; + __entry->refresh_rate_hz = refresh_rate_hz; + ), + TP_printk("crtc_index=%d refresh_rate=%dHz (%lld)", + __entry->crtc_index, + __entry->refresh_rate_hz, + __entry->refresh_rate_ns) +); + +TRACE_EVENT(dcn_fpu, + TP_PROTO(bool begin, const char *function, const int line, const int recursion_depth), + TP_ARGS(begin, function, line, recursion_depth), + + TP_STRUCT__entry( + __field(bool, begin) + __field(const char *, function) + __field(int, line) + __field(int, recursion_depth) + ), + TP_fast_assign( + __entry->begin = begin; + __entry->function = function; + __entry->line = line; + __entry->recursion_depth = recursion_depth; + ), + TP_printk("%s: recursion_depth: %d: %s()+%d:", + __entry->begin ? "begin" : "end", + __entry->recursion_depth, + __entry->function, + __entry->line + ) +); + +TRACE_EVENT(dcn_optc_lock_unlock_state, + TP_PROTO(const struct optc *optc_state, int instance, bool lock, const char *function, const int line), + TP_ARGS(optc_state, instance, lock, function, line), + + TP_STRUCT__entry( + __field(const char *, function) + __field(int, instance) + __field(bool, lock) + __field(int, line) + __field(int, opp_count) + __field(int, max_h_total) + __field(int, max_v_total) + __field(int, min_h_blank) + __field(int, min_h_sync_width) + __field(int, min_v_sync_width) + __field(int, min_v_blank) + __field(int, min_v_blank_interlace) + __field(int, vstartup_start) + __field(int, vupdate_offset) + __field(int, vupdate_width) + __field(int, vready_offset) + ), + TP_fast_assign( + __entry->function = function; + __entry->instance = instance; + __entry->lock = lock; + __entry->line = line; + __entry->opp_count = optc_state->opp_count; + __entry->max_h_total = optc_state->max_h_total; + __entry->max_v_total = optc_state->max_v_total; + __entry->min_h_blank = optc_state->min_h_blank; + __entry->min_h_sync_width = optc_state->min_h_sync_width; + __entry->min_v_sync_width = optc_state->min_v_sync_width; + __entry->min_v_blank = optc_state->min_v_blank; + __entry->min_v_blank_interlace = optc_state->min_v_blank_interlace; + __entry->vstartup_start = optc_state->vstartup_start; + __entry->vupdate_offset = optc_state->vupdate_offset; + __entry->vupdate_width = optc_state->vupdate_width; + __entry->vready_offset = optc_state->vupdate_offset; + ), + TP_printk("%s: %s()+%d: optc_instance=%d opp_count=%d max_h_total=%d max_v_total=%d " + "min_h_blank=%d min_h_sync_width=%d min_v_sync_width=%d min_v_blank=%d " + "min_v_blank_interlace=%d vstartup_start=%d vupdate_offset=%d vupdate_width=%d " + "vready_offset=%d", + __entry->lock ? "Lock" : "Unlock", + __entry->function, + __entry->line, + __entry->instance, + __entry->opp_count, + __entry->max_h_total, + __entry->max_v_total, + __entry->min_h_blank, + __entry->min_h_sync_width, + __entry->min_v_sync_width, + __entry->min_v_blank, + __entry->min_v_blank_interlace, + __entry->vstartup_start, + __entry->vupdate_offset, + __entry->vupdate_width, + __entry->vready_offset + ) +); + +#endif /* _AMDGPU_DM_TRACE_H_ */ + +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#define TRACE_INCLUDE_FILE amdgpu_dm_trace +#include <trace/define_trace.h> diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c new file mode 100644 index 0000000000..172aa10a88 --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c @@ -0,0 +1,154 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dc_trace.h" + +#if defined(CONFIG_X86) +#include <asm/fpu/api.h> +#elif defined(CONFIG_PPC64) +#include <asm/switch_to.h> +#include <asm/cputable.h> +#elif defined(CONFIG_ARM64) +#include <asm/neon.h> +#elif defined(CONFIG_LOONGARCH) +#include <asm/fpu.h> +#endif + +/** + * DOC: DC FPU manipulation overview + * + * DC core uses FPU operations in multiple parts of the code, which requires a + * more specialized way to manage these areas' entrance. To fulfill this + * requirement, we created some wrapper functions that encapsulate + * kernel_fpu_begin/end to better fit our need in the display component. In + * summary, in this file, you can find functions related to FPU operation + * management. + */ + +static DEFINE_PER_CPU(int, fpu_recursion_depth); + +/** + * dc_assert_fp_enabled - Check if FPU protection is enabled + * + * This function tells if the code is already under FPU protection or not. A + * function that works as an API for a set of FPU operations can use this + * function for checking if the caller invoked it after DC_FP_START(). For + * example, take a look at dcn20_fpu.c file. + */ +inline void dc_assert_fp_enabled(void) +{ + int *pcpu, depth = 0; + + pcpu = get_cpu_ptr(&fpu_recursion_depth); + depth = *pcpu; + put_cpu_ptr(&fpu_recursion_depth); + + ASSERT(depth >= 1); +} + +/** + * dc_fpu_begin - Enables FPU protection + * @function_name: A string containing the function name for debug purposes + * (usually __func__) + * + * @line: A line number where DC_FP_START was invoked for debug purpose + * (usually __LINE__) + * + * This function is responsible for managing the use of kernel_fpu_begin() with + * the advantage of providing an event trace for debugging. + * + * Note: Do not call this function directly; always use DC_FP_START(). + */ +void dc_fpu_begin(const char *function_name, const int line) +{ + int *pcpu; + + pcpu = get_cpu_ptr(&fpu_recursion_depth); + *pcpu += 1; + + if (*pcpu == 1) { +#if defined(CONFIG_X86) || defined(CONFIG_LOONGARCH) + migrate_disable(); + kernel_fpu_begin(); +#elif defined(CONFIG_PPC64) + if (cpu_has_feature(CPU_FTR_VSX_COMP)) { + preempt_disable(); + enable_kernel_vsx(); + } else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP)) { + preempt_disable(); + enable_kernel_altivec(); + } else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE)) { + preempt_disable(); + enable_kernel_fp(); + } +#elif defined(CONFIG_ARM64) + kernel_neon_begin(); +#endif + } + + TRACE_DCN_FPU(true, function_name, line, *pcpu); + put_cpu_ptr(&fpu_recursion_depth); +} + +/** + * dc_fpu_end - Disable FPU protection + * @function_name: A string containing the function name for debug purposes + * @line: A-line number where DC_FP_END was invoked for debug purpose + * + * This function is responsible for managing the use of kernel_fpu_end() with + * the advantage of providing an event trace for debugging. + * + * Note: Do not call this function directly; always use DC_FP_END(). + */ +void dc_fpu_end(const char *function_name, const int line) +{ + int *pcpu; + + pcpu = get_cpu_ptr(&fpu_recursion_depth); + *pcpu -= 1; + if (*pcpu <= 0) { +#if defined(CONFIG_X86) || defined(CONFIG_LOONGARCH) + kernel_fpu_end(); + migrate_enable(); +#elif defined(CONFIG_PPC64) + if (cpu_has_feature(CPU_FTR_VSX_COMP)) { + disable_kernel_vsx(); + preempt_enable(); + } else if (cpu_has_feature(CPU_FTR_ALTIVEC_COMP)) { + disable_kernel_altivec(); + preempt_enable(); + } else if (!cpu_has_feature(CPU_FTR_FPU_UNAVAILABLE)) { + disable_kernel_fp(); + preempt_enable(); + } +#elif defined(CONFIG_ARM64) + kernel_neon_end(); +#endif + } + + TRACE_DCN_FPU(false, function_name, line, *pcpu); + put_cpu_ptr(&fpu_recursion_depth); +} diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.h b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.h new file mode 100644 index 0000000000..b8275b3979 --- /dev/null +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DC_FPU_H__ +#define __DC_FPU_H__ + +void dc_assert_fp_enabled(void); +void dc_fpu_begin(const char *function_name, const int line); +void dc_fpu_end(const char *function_name, const int line); + +#endif /* __DC_FPU_H__ */ |