summaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c9
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c23
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c15
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c58
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_state.c67
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_stream.c90
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c49
-rw-r--r--drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/link_dpms.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c9
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c8
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c8
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c57
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c14
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c36
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c16
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c5
-rw-r--r--drivers/gpu/drm/display/drm_dp_mst_topology.c11
-rw-r--r--drivers/gpu/drm/drm_atomic_uapi.c15
-rw-r--r--drivers/gpu/drm/drm_client_modeset.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_backlight.c3
-rw-r--r--drivers/gpu/drm/i915/display/intel_pps.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c55
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_ttm.c13
-rw-r--r--drivers/gpu/drm/lima/lima_drv.c1
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_i2c.c8
-rw-r--r--drivers/gpu/drm/radeon/pptable.h2
-rw-r--r--drivers/gpu/drm/tests/drm_gem_shmem_test.c11
-rw-r--r--drivers/gpu/drm/xe/regs/xe_engine_regs.h4
-rw-r--r--drivers/gpu/drm/xe/xe_guc_submit.c2
-rw-r--r--drivers/gpu/drm/xe/xe_hwmon.c3
-rw-r--r--drivers/gpu/drm/xe/xe_lrc.c17
-rw-r--r--drivers/gpu/drm/xe/xe_preempt_fence.c14
-rw-r--r--drivers/gpu/drm/xe/xe_rtp.c2
-rw-r--r--drivers/gpu/drm/xe/xe_sync.c2
47 files changed, 474 insertions, 226 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index ee7df1d84e..89cf9ac6da 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -4048,6 +4048,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
mutex_init(&adev->grbm_idx_mutex);
mutex_init(&adev->mn_lock);
mutex_init(&adev->virt.vf_errors.lock);
+ mutex_init(&adev->virt.rlcg_reg_lock);
hash_init(adev->mn_hash);
mutex_init(&adev->psp.mutex);
mutex_init(&adev->notifier_lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index e4742b6503..4a9cec0026 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -262,9 +262,8 @@ amdgpu_job_prepare_job(struct drm_sched_job *sched_job,
struct dma_fence *fence = NULL;
int r;
- /* Ignore soft recovered fences here */
r = drm_sched_entity_error(s_entity);
- if (r && r != -ENODATA)
+ if (r)
goto error;
if (!fence && job->gang_submit)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
index ca5c86e5f7..8e8afbd237 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp_ta.c
@@ -334,7 +334,7 @@ static ssize_t ta_if_invoke_debugfs_write(struct file *fp, const char *buf, size
set_ta_context_funcs(psp, ta_type, &context);
- if (!context->initialized) {
+ if (!context || !context->initialized) {
dev_err(adev->dev, "TA is not initialized\n");
ret = -EINVAL;
goto err_free_shared_buf;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index 1adc81a557..0c4ee06451 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -2172,12 +2172,15 @@ static void amdgpu_ras_interrupt_process_handler(struct work_struct *work)
int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev,
struct ras_dispatch_if *info)
{
- struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head);
- struct ras_ih_data *data = &obj->ih_data;
+ struct ras_manager *obj;
+ struct ras_ih_data *data;
+ obj = amdgpu_ras_find_obj(adev, &info->head);
if (!obj)
return -EINVAL;
+ data = &obj->ih_data;
+
if (data->inuse == 0)
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
index 54ab51a4ad..972a58f0f4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c
@@ -980,6 +980,9 @@ u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 f
scratch_reg1 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg1;
scratch_reg2 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg2;
scratch_reg3 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg3;
+
+ mutex_lock(&adev->virt.rlcg_reg_lock);
+
if (reg_access_ctrl->spare_int)
spare_int = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->spare_int;
@@ -1036,6 +1039,9 @@ u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 f
}
ret = readl(scratch_reg0);
+
+ mutex_unlock(&adev->virt.rlcg_reg_lock);
+
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index 642f1fd287..0ec246c745 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -272,6 +272,8 @@ struct amdgpu_virt {
/* the ucode id to signal the autoload */
uint32_t autoload_ucode_id;
+
+ struct mutex rlcg_reg_lock;
};
struct amdgpu_video_codec_info;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
index 66e8a01612..9b748d7058 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
@@ -102,6 +102,11 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
if (!r)
r = amdgpu_sync_push_to_job(&sync, p->job);
amdgpu_sync_free(&sync);
+
+ if (r) {
+ p->num_dw_left = 0;
+ amdgpu_job_free(p->job);
+ }
return r;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 31e500859a..9248525124 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -1658,7 +1658,7 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
start = map_start << PAGE_SHIFT;
end = (map_last + 1) << PAGE_SHIFT;
for (addr = start; !r && addr < end; ) {
- struct hmm_range *hmm_range;
+ struct hmm_range *hmm_range = NULL;
unsigned long map_start_vma;
unsigned long map_last_vma;
struct vm_area_struct *vma;
@@ -1696,7 +1696,12 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
}
svm_range_lock(prange);
- if (!r && amdgpu_hmm_range_get_pages_done(hmm_range)) {
+
+ /* Free backing memory of hmm_range if it was initialized
+ * Overrride return value to TRY AGAIN only if prior returns
+ * were successful
+ */
+ if (hmm_range && amdgpu_hmm_range_get_pages_done(hmm_range) && !r) {
pr_debug("hmm update the range, need validate again\n");
r = -EAGAIN;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 3cdcadd41b..836bf9ba62 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -2701,7 +2701,8 @@ static int dm_suspend(void *handle)
dm->cached_dc_state = dc_state_create_copy(dm->dc->current_state);
- dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
+ if (dm->cached_dc_state)
+ dm_gpureset_toggle_interrupts(adev, dm->cached_dc_state, false);
amdgpu_dm_commit_zero_streams(dm->dc);
@@ -2943,6 +2944,7 @@ static int dm_resume(void *handle)
commit_params.streams = dc_state->streams;
commit_params.stream_count = dc_state->stream_count;
+ dc_exit_ips_for_hw_access(dm->dc);
WARN_ON(!dc_commit_streams(dm->dc, &commit_params));
dm_gpureset_commit_state(dm->cached_dc_state, dm);
@@ -3015,7 +3017,8 @@ static int dm_resume(void *handle)
emulated_link_detect(aconnector->dc_link);
} else {
mutex_lock(&dm->dc_lock);
- dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
+ dc_exit_ips_for_hw_access(dm->dc);
+ dc_link_detect(aconnector->dc_link, DETECT_REASON_RESUMEFROMS3S4);
mutex_unlock(&dm->dc_lock);
}
@@ -3351,6 +3354,7 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
enum dc_connection_type new_connection_type = dc_connection_none;
struct amdgpu_device *adev = drm_to_adev(dev);
struct dm_connector_state *dm_con_state = to_dm_connector_state(connector->state);
+ struct dc *dc = aconnector->dc_link->ctx->dc;
bool ret = false;
if (adev->dm.disable_hpd_irq)
@@ -3385,6 +3389,7 @@ static void handle_hpd_irq_helper(struct amdgpu_dm_connector *aconnector)
drm_kms_helper_connector_hotplug_event(connector);
} else {
mutex_lock(&adev->dm.dc_lock);
+ dc_exit_ips_for_hw_access(dc);
ret = dc_link_detect(aconnector->dc_link, DETECT_REASON_HPD);
mutex_unlock(&adev->dm.dc_lock);
if (ret) {
@@ -3444,6 +3449,7 @@ static void handle_hpd_rx_irq(void *param)
bool has_left_work = false;
int idx = dc_link->link_index;
struct hpd_rx_irq_offload_work_queue *offload_wq = &adev->dm.hpd_rx_offload_wq[idx];
+ struct dc *dc = aconnector->dc_link->ctx->dc;
memset(&hpd_irq_data, 0, sizeof(hpd_irq_data));
@@ -3533,6 +3539,7 @@ out:
bool ret = false;
mutex_lock(&adev->dm.dc_lock);
+ dc_exit_ips_for_hw_access(dc);
ret = dc_link_detect(dc_link, DETECT_REASON_HPDRX);
mutex_unlock(&adev->dm.dc_lock);
@@ -4639,6 +4646,7 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
bool ret = false;
mutex_lock(&dm->dc_lock);
+ dc_exit_ips_for_hw_access(dm->dc);
ret = dc_link_detect(link, DETECT_REASON_BOOT);
mutex_unlock(&dm->dc_lock);
@@ -6788,7 +6796,8 @@ static void create_eml_sink(struct amdgpu_dm_connector *aconnector)
aconnector->dc_sink = aconnector->dc_link->local_sink ?
aconnector->dc_link->local_sink :
aconnector->dc_em_sink;
- dc_sink_retain(aconnector->dc_sink);
+ if (aconnector->dc_sink)
+ dc_sink_retain(aconnector->dc_sink);
}
}
@@ -7615,7 +7624,8 @@ static int amdgpu_dm_connector_get_modes(struct drm_connector *connector)
drm_add_modes_noedid(connector, 1920, 1080);
} else {
amdgpu_dm_connector_ddc_get_modes(connector, edid);
- amdgpu_dm_connector_add_common_modes(encoder, connector);
+ if (encoder)
+ amdgpu_dm_connector_add_common_modes(encoder, connector);
amdgpu_dm_connector_add_freesync_modes(connector, edid);
}
amdgpu_dm_fbc_init(connector);
@@ -8945,7 +8955,8 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
memset(&position, 0, sizeof(position));
mutex_lock(&dm->dc_lock);
- dc_stream_set_cursor_position(dm_old_crtc_state->stream, &position);
+ dc_exit_ips_for_hw_access(dm->dc);
+ dc_stream_program_cursor_position(dm_old_crtc_state->stream, &position);
mutex_unlock(&dm->dc_lock);
}
@@ -9014,6 +9025,7 @@ static void amdgpu_dm_commit_streams(struct drm_atomic_state *state,
dm_enable_per_frame_crtc_master_sync(dc_state);
mutex_lock(&dm->dc_lock);
+ dc_exit_ips_for_hw_access(dm->dc);
WARN_ON(!dc_commit_streams(dm->dc, &params));
/* Allow idle optimization when vblank count is 0 for display off */
@@ -9379,6 +9391,7 @@ static void amdgpu_dm_atomic_commit_tail(struct drm_atomic_state *state)
mutex_lock(&dm->dc_lock);
+ dc_exit_ips_for_hw_access(dm->dc);
dc_update_planes_and_stream(dm->dc,
dummy_updates,
status->plane_count,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index a5e1a93dda..b50010ed76 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -182,6 +182,8 @@ amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector)
dc_sink_release(dc_sink);
aconnector->dc_sink = NULL;
aconnector->edid = NULL;
+ aconnector->dsc_aux = NULL;
+ port->passthrough_aux = NULL;
}
aconnector->mst_status = MST_STATUS_DEFAULT;
@@ -494,6 +496,8 @@ dm_dp_mst_detect(struct drm_connector *connector,
dc_sink_release(aconnector->dc_sink);
aconnector->dc_sink = NULL;
aconnector->edid = NULL;
+ aconnector->dsc_aux = NULL;
+ port->passthrough_aux = NULL;
amdgpu_dm_set_mst_status(&aconnector->mst_status,
MST_REMOTE_EDID | MST_ALLOCATE_NEW_PAYLOAD | MST_CLEAR_ALLOCATED_PAYLOAD,
@@ -1233,14 +1237,6 @@ static bool is_dsc_need_re_compute(
if (!aconnector || !aconnector->dsc_aux)
continue;
- /*
- * check if cached virtual MST DSC caps are available and DSC is supported
- * as per specifications in their Virtual DPCD registers.
- */
- if (!(aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported ||
- aconnector->dc_link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_PASSTHROUGH_SUPPORT))
- continue;
-
stream_on_link[new_stream_on_link_num] = aconnector;
new_stream_on_link_num++;
@@ -1268,6 +1264,9 @@ static bool is_dsc_need_re_compute(
}
}
+ if (new_stream_on_link_num == 0)
+ return false;
+
/* check current_state if there stream on link but it is not in
* new request state
*/
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
index 8a4c40b4c2..311c62d2d1 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_plane.c
@@ -1254,7 +1254,7 @@ void amdgpu_dm_plane_handle_cursor_update(struct drm_plane *plane,
/* turn off cursor */
if (crtc_state && crtc_state->stream) {
mutex_lock(&adev->dm.dc_lock);
- dc_stream_set_cursor_position(crtc_state->stream,
+ dc_stream_program_cursor_position(crtc_state->stream,
&position);
mutex_unlock(&adev->dm.dc_lock);
}
@@ -1284,11 +1284,11 @@ void amdgpu_dm_plane_handle_cursor_update(struct drm_plane *plane,
if (crtc_state->stream) {
mutex_lock(&adev->dm.dc_lock);
- if (!dc_stream_set_cursor_attributes(crtc_state->stream,
+ if (!dc_stream_program_cursor_attributes(crtc_state->stream,
&attributes))
DRM_ERROR("DC failed to set cursor attributes\n");
- if (!dc_stream_set_cursor_position(crtc_state->stream,
+ if (!dc_stream_program_cursor_position(crtc_state->stream,
&position))
DRM_ERROR("DC failed to set cursor position\n");
mutex_unlock(&adev->dm.dc_lock);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 15819416a2..8ed5993246 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -2267,6 +2267,10 @@ void resource_log_pipe_topology_update(struct dc *dc, struct dc_state *state)
otg_master = resource_get_otg_master_for_stream(
&state->res_ctx, state->streams[stream_idx]);
+
+ if (!otg_master)
+ continue;
+
resource_log_pipe_for_stream(dc, state, otg_master, stream_idx);
}
if (state->phantom_stream_count > 0) {
@@ -2508,6 +2512,17 @@ static void remove_hpo_dp_link_enc_from_ctx(struct resource_context *res_ctx,
}
}
+static int get_num_of_free_pipes(const struct resource_pool *pool, const struct dc_state *context)
+{
+ int i;
+ int count = 0;
+
+ for (i = 0; i < pool->pipe_count; i++)
+ if (resource_is_pipe_type(&context->res_ctx.pipe_ctx[i], FREE_PIPE))
+ count++;
+ return count;
+}
+
enum dc_status resource_add_otg_master_for_stream_output(struct dc_state *new_ctx,
const struct resource_pool *pool,
struct dc_stream_state *stream)
@@ -2641,37 +2656,33 @@ static bool acquire_secondary_dpp_pipes_and_add_plane(
struct dc_state *cur_ctx,
struct resource_pool *pool)
{
- struct pipe_ctx *opp_head_pipe, *sec_pipe, *tail_pipe;
+ struct pipe_ctx *sec_pipe, *tail_pipe;
+ struct pipe_ctx *opp_heads[MAX_PIPES];
+ int opp_head_count;
+ int i;
if (!pool->funcs->acquire_free_pipe_as_secondary_dpp_pipe) {
ASSERT(0);
return false;
}
- opp_head_pipe = otg_master_pipe;
- while (opp_head_pipe) {
+ opp_head_count = resource_get_opp_heads_for_otg_master(otg_master_pipe,
+ &new_ctx->res_ctx, opp_heads);
+ if (get_num_of_free_pipes(pool, new_ctx) < opp_head_count)
+ /* not enough free pipes */
+ return false;
+
+ for (i = 0; i < opp_head_count; i++) {
sec_pipe = pool->funcs->acquire_free_pipe_as_secondary_dpp_pipe(
cur_ctx,
new_ctx,
pool,
- opp_head_pipe);
- if (!sec_pipe) {
- /* try tearing down MPCC combine */
- int pipe_idx = acquire_first_split_pipe(
- &new_ctx->res_ctx, pool,
- otg_master_pipe->stream);
-
- if (pipe_idx >= 0)
- sec_pipe = &new_ctx->res_ctx.pipe_ctx[pipe_idx];
- }
-
- if (!sec_pipe)
- return false;
-
+ opp_heads[i]);
+ ASSERT(sec_pipe);
sec_pipe->plane_state = plane_state;
/* establish pipe relationship */
- tail_pipe = get_tail_pipe(opp_head_pipe);
+ tail_pipe = get_tail_pipe(opp_heads[i]);
tail_pipe->bottom_pipe = sec_pipe;
sec_pipe->top_pipe = tail_pipe;
sec_pipe->bottom_pipe = NULL;
@@ -2682,8 +2693,6 @@ static bool acquire_secondary_dpp_pipes_and_add_plane(
} else {
sec_pipe->prev_odm_pipe = NULL;
}
-
- opp_head_pipe = opp_head_pipe->next_odm_pipe;
}
return true;
}
@@ -2696,6 +2705,7 @@ bool resource_append_dpp_pipes_for_plane_composition(
struct dc_plane_state *plane_state)
{
bool success;
+
if (otg_master_pipe->plane_state == NULL)
success = add_plane_to_opp_head_pipes(otg_master_pipe,
plane_state, new_ctx);
@@ -2703,10 +2713,15 @@ bool resource_append_dpp_pipes_for_plane_composition(
success = acquire_secondary_dpp_pipes_and_add_plane(
otg_master_pipe, plane_state, new_ctx,
cur_ctx, pool);
- if (success)
+ if (success) {
/* when appending a plane mpc slice count changes from 0 to 1 */
success = update_pipe_params_after_mpc_slice_count_change(
plane_state, new_ctx, pool);
+ if (!success)
+ resource_remove_dpp_pipes_for_plane_composition(new_ctx,
+ pool, plane_state);
+ }
+
return success;
}
@@ -2716,6 +2731,7 @@ void resource_remove_dpp_pipes_for_plane_composition(
const struct dc_plane_state *plane_state)
{
int i;
+
for (i = pool->pipe_count - 1; i >= 0; i--) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
index 76bb05f4d6..52a1cfc5fe 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_state.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c
@@ -437,6 +437,19 @@ enum dc_status dc_state_remove_stream(
return DC_OK;
}
+static void remove_mpc_combine_for_stream(const struct dc *dc,
+ struct dc_state *new_ctx,
+ const struct dc_state *cur_ctx,
+ struct dc_stream_status *status)
+{
+ int i;
+
+ for (i = 0; i < status->plane_count; i++)
+ resource_update_pipes_for_plane_with_slice_count(
+ new_ctx, cur_ctx, dc->res_pool,
+ status->plane_states[i], 1);
+}
+
bool dc_state_add_plane(
const struct dc *dc,
struct dc_stream_state *stream,
@@ -447,8 +460,12 @@ bool dc_state_add_plane(
struct pipe_ctx *otg_master_pipe;
struct dc_stream_status *stream_status = NULL;
bool added = false;
+ int odm_slice_count;
+ int i;
stream_status = dc_state_get_stream_status(state, stream);
+ otg_master_pipe = resource_get_otg_master_for_stream(
+ &state->res_ctx, stream);
if (stream_status == NULL) {
dm_error("Existing stream not found; failed to attach surface!\n");
goto out;
@@ -456,22 +473,39 @@ bool dc_state_add_plane(
dm_error("Surface: can not attach plane_state %p! Maximum is: %d\n",
plane_state, MAX_SURFACE_NUM);
goto out;
+ } else if (!otg_master_pipe) {
+ goto out;
}
- if (stream_status->plane_count == 0 && dc->config.enable_windowed_mpo_odm)
- /* ODM combine could prevent us from supporting more planes
- * we will reset ODM slice count back to 1 when all planes have
- * been removed to maximize the amount of planes supported when
- * new planes are added.
- */
- resource_update_pipes_for_stream_with_slice_count(
- state, dc->current_state, dc->res_pool, stream, 1);
+ added = resource_append_dpp_pipes_for_plane_composition(state,
+ dc->current_state, pool, otg_master_pipe, plane_state);
- otg_master_pipe = resource_get_otg_master_for_stream(
- &state->res_ctx, stream);
- if (otg_master_pipe)
+ if (!added) {
+ /* try to remove MPC combine to free up pipes */
+ for (i = 0; i < state->stream_count; i++)
+ remove_mpc_combine_for_stream(dc, state,
+ dc->current_state,
+ &state->stream_status[i]);
added = resource_append_dpp_pipes_for_plane_composition(state,
- dc->current_state, pool, otg_master_pipe, plane_state);
+ dc->current_state, pool,
+ otg_master_pipe, plane_state);
+ }
+
+ if (!added) {
+ /* try to decrease ODM slice count gradually to free up pipes */
+ odm_slice_count = resource_get_odm_slice_count(otg_master_pipe);
+ for (i = odm_slice_count - 1; i > 0; i--) {
+ resource_update_pipes_for_stream_with_slice_count(state,
+ dc->current_state, dc->res_pool, stream,
+ i);
+ added = resource_append_dpp_pipes_for_plane_composition(
+ state,
+ dc->current_state, pool,
+ otg_master_pipe, plane_state);
+ if (added)
+ break;
+ }
+ }
if (added) {
stream_status->plane_states[stream_status->plane_count] =
@@ -531,15 +565,6 @@ bool dc_state_remove_plane(
stream_status->plane_states[stream_status->plane_count] = NULL;
- if (stream_status->plane_count == 0 && dc->config.enable_windowed_mpo_odm)
- /* ODM combine could prevent us from supporting more planes
- * we will reset ODM slice count back to 1 when all planes have
- * been removed to maximize the amount of planes supported when
- * new planes are added.
- */
- resource_update_pipes_for_stream_with_slice_count(
- state, dc->current_state, dc->res_pool, stream, 1);
-
return true;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
index 5c7e4884ca..53bc991b6e 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
@@ -266,7 +266,6 @@ bool dc_stream_set_cursor_attributes(
const struct dc_cursor_attributes *attributes)
{
struct dc *dc;
- bool reset_idle_optimizations = false;
if (NULL == stream) {
dm_error("DC: dc_stream is NULL!\n");
@@ -297,20 +296,36 @@ bool dc_stream_set_cursor_attributes(
stream->cursor_attributes = *attributes;
- dc_z10_restore(dc);
- /* disable idle optimizations while updating cursor */
- if (dc->idle_optimizations_allowed) {
- dc_allow_idle_optimizations(dc, false);
- reset_idle_optimizations = true;
- }
+ return true;
+}
- program_cursor_attributes(dc, stream, attributes);
+bool dc_stream_program_cursor_attributes(
+ struct dc_stream_state *stream,
+ const struct dc_cursor_attributes *attributes)
+{
+ struct dc *dc;
+ bool reset_idle_optimizations = false;
- /* re-enable idle optimizations if necessary */
- if (reset_idle_optimizations && !dc->debug.disable_dmub_reallow_idle)
- dc_allow_idle_optimizations(dc, true);
+ dc = stream ? stream->ctx->dc : NULL;
- return true;
+ if (dc_stream_set_cursor_attributes(stream, attributes)) {
+ dc_z10_restore(dc);
+ /* disable idle optimizations while updating cursor */
+ if (dc->idle_optimizations_allowed) {
+ dc_allow_idle_optimizations(dc, false);
+ reset_idle_optimizations = true;
+ }
+
+ program_cursor_attributes(dc, stream, attributes);
+
+ /* re-enable idle optimizations if necessary */
+ if (reset_idle_optimizations && !dc->debug.disable_dmub_reallow_idle)
+ dc_allow_idle_optimizations(dc, true);
+
+ return true;
+ }
+
+ return false;
}
static void program_cursor_position(
@@ -355,9 +370,6 @@ bool dc_stream_set_cursor_position(
struct dc_stream_state *stream,
const struct dc_cursor_position *position)
{
- struct dc *dc;
- bool reset_idle_optimizations = false;
-
if (NULL == stream) {
dm_error("DC: dc_stream is NULL!\n");
return false;
@@ -368,24 +380,46 @@ bool dc_stream_set_cursor_position(
return false;
}
+ stream->cursor_position = *position;
+
+
+ return true;
+}
+
+bool dc_stream_program_cursor_position(
+ struct dc_stream_state *stream,
+ const struct dc_cursor_position *position)
+{
+ struct dc *dc;
+ bool reset_idle_optimizations = false;
+ const struct dc_cursor_position *old_position;
+
+ if (!stream)
+ return false;
+
+ old_position = &stream->cursor_position;
dc = stream->ctx->dc;
- dc_z10_restore(dc);
- /* disable idle optimizations if enabling cursor */
- if (dc->idle_optimizations_allowed && (!stream->cursor_position.enable || dc->debug.exit_idle_opt_for_cursor_updates)
- && position->enable) {
- dc_allow_idle_optimizations(dc, false);
- reset_idle_optimizations = true;
- }
+ if (dc_stream_set_cursor_position(stream, position)) {
+ dc_z10_restore(dc);
- stream->cursor_position = *position;
+ /* disable idle optimizations if enabling cursor */
+ if (dc->idle_optimizations_allowed &&
+ (!old_position->enable || dc->debug.exit_idle_opt_for_cursor_updates) &&
+ position->enable) {
+ dc_allow_idle_optimizations(dc, false);
+ reset_idle_optimizations = true;
+ }
- program_cursor_position(dc, stream, position);
- /* re-enable idle optimizations if necessary */
- if (reset_idle_optimizations && !dc->debug.disable_dmub_reallow_idle)
- dc_allow_idle_optimizations(dc, true);
+ program_cursor_position(dc, stream, position);
+ /* re-enable idle optimizations if necessary */
+ if (reset_idle_optimizations && !dc->debug.disable_dmub_reallow_idle)
+ dc_allow_idle_optimizations(dc, true);
- return true;
+ return true;
+ }
+
+ return false;
}
bool dc_stream_add_writeback(struct dc *dc,
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index e5dbbc6089..1039dfb0b0 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -470,10 +470,18 @@ bool dc_stream_set_cursor_attributes(
struct dc_stream_state *stream,
const struct dc_cursor_attributes *attributes);
+bool dc_stream_program_cursor_attributes(
+ struct dc_stream_state *stream,
+ const struct dc_cursor_attributes *attributes);
+
bool dc_stream_set_cursor_position(
struct dc_stream_state *stream,
const struct dc_cursor_position *position);
+bool dc_stream_program_cursor_position(
+ struct dc_stream_state *stream,
+ const struct dc_cursor_position *position);
+
bool dc_stream_adjust_vmin_vmax(struct dc *dc,
struct dc_stream_state *stream,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
index 4f559a025c..09cf54586f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_replay.c
@@ -84,7 +84,7 @@ static void dmub_replay_enable(struct dmub_replay *dmub, bool enable, bool wait,
cmd.replay_enable.header.payload_bytes = sizeof(struct dmub_rb_cmd_replay_enable_data);
- dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
/* Below loops 1000 x 500us = 500 ms.
* Exit REPLAY may need to wait 1-2 frames to power up. Timeout after at
@@ -127,7 +127,7 @@ static void dmub_replay_set_power_opt(struct dmub_replay *dmub, unsigned int pow
cmd.replay_set_power_opt.replay_set_power_opt_data.power_opt = power_opt;
cmd.replay_set_power_opt.replay_set_power_opt_data.panel_inst = panel_inst;
- dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
/*
@@ -209,8 +209,7 @@ static bool dmub_replay_copy_settings(struct dmub_replay *dmub,
else
copy_settings_data->flags.bitfields.force_wakeup_by_tps3 = 0;
-
- dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
return true;
}
@@ -231,7 +230,7 @@ static void dmub_replay_set_coasting_vtotal(struct dmub_replay *dmub,
cmd.replay_set_coasting_vtotal.header.payload_bytes = sizeof(struct dmub_cmd_replay_set_coasting_vtotal_data);
cmd.replay_set_coasting_vtotal.replay_set_coasting_vtotal_data.coasting_vtotal = coasting_vtotal;
- dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
+ dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
}
/*
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
index 0c4aef8ffe..3306684e80 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn10/dcn10_hwseq.c
@@ -288,6 +288,7 @@ static void dcn10_log_color_state(struct dc *dc,
{
struct dc_context *dc_ctx = dc->ctx;
struct resource_pool *pool = dc->res_pool;
+ bool is_gamut_remap_available = false;
int i;
DTN_INFO("DPP: IGAM format IGAM mode DGAM mode RGAM mode"
@@ -300,16 +301,15 @@ static void dcn10_log_color_state(struct dc *dc,
struct dcn_dpp_state s = {0};
dpp->funcs->dpp_read_state(dpp, &s);
- dpp->funcs->dpp_get_gamut_remap(dpp, &s.gamut_remap);
+ if (dpp->funcs->dpp_get_gamut_remap) {
+ dpp->funcs->dpp_get_gamut_remap(dpp, &s.gamut_remap);
+ is_gamut_remap_available = true;
+ }
if (!s.is_enabled)
continue;
- DTN_INFO("[%2d]: %11xh %11s %9s %9s"
- " %12s "
- "%010lld %010lld %010lld %010lld "
- "%010lld %010lld %010lld %010lld "
- "%010lld %010lld %010lld %010lld",
+ DTN_INFO("[%2d]: %11xh %11s %9s %9s",
dpp->inst,
s.igam_input_format,
(s.igam_lut_mode == 0) ? "BypassFixed" :
@@ -328,22 +328,27 @@ static void dcn10_log_color_state(struct dc *dc,
((s.rgam_lut_mode == 2) ? "Ycc" :
((s.rgam_lut_mode == 3) ? "RAM" :
((s.rgam_lut_mode == 4) ? "RAM" :
- "Unknown")))),
- (s.gamut_remap.gamut_adjust_type == 0) ? "Bypass" :
- ((s.gamut_remap.gamut_adjust_type == 1) ? "HW" :
- "SW"),
- s.gamut_remap.temperature_matrix[0].value,
- s.gamut_remap.temperature_matrix[1].value,
- s.gamut_remap.temperature_matrix[2].value,
- s.gamut_remap.temperature_matrix[3].value,
- s.gamut_remap.temperature_matrix[4].value,
- s.gamut_remap.temperature_matrix[5].value,
- s.gamut_remap.temperature_matrix[6].value,
- s.gamut_remap.temperature_matrix[7].value,
- s.gamut_remap.temperature_matrix[8].value,
- s.gamut_remap.temperature_matrix[9].value,
- s.gamut_remap.temperature_matrix[10].value,
- s.gamut_remap.temperature_matrix[11].value);
+ "Unknown")))));
+ if (is_gamut_remap_available)
+ DTN_INFO(" %12s "
+ "%010lld %010lld %010lld %010lld "
+ "%010lld %010lld %010lld %010lld "
+ "%010lld %010lld %010lld %010lld",
+ (s.gamut_remap.gamut_adjust_type == 0) ? "Bypass" :
+ ((s.gamut_remap.gamut_adjust_type == 1) ? "HW" : "SW"),
+ s.gamut_remap.temperature_matrix[0].value,
+ s.gamut_remap.temperature_matrix[1].value,
+ s.gamut_remap.temperature_matrix[2].value,
+ s.gamut_remap.temperature_matrix[3].value,
+ s.gamut_remap.temperature_matrix[4].value,
+ s.gamut_remap.temperature_matrix[5].value,
+ s.gamut_remap.temperature_matrix[6].value,
+ s.gamut_remap.temperature_matrix[7].value,
+ s.gamut_remap.temperature_matrix[8].value,
+ s.gamut_remap.temperature_matrix[9].value,
+ s.gamut_remap.temperature_matrix[10].value,
+ s.gamut_remap.temperature_matrix[11].value);
+
DTN_INFO("\n");
}
DTN_INFO("\n");
diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
index ed9141a67d..4c47061533 100644
--- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c
@@ -919,6 +919,9 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable)
stream = dc->current_state->streams[0];
plane = (stream ? dc->current_state->stream_status[0].plane_states[0] : NULL);
+ if (!stream || !plane)
+ return false;
+
if (stream && plane) {
cursor_cache_enable = stream->cursor_position.enable &&
plane->address.grph.cursor_cache_addr.quad_part;
@@ -1038,7 +1041,7 @@ bool dcn30_apply_idle_power_optimizations(struct dc *dc, bool enable)
/* Use copied cursor, and it's okay to not switch back */
cursor_attr.address.quad_part = cmd.mall.cursor_copy_dst.quad_part;
- dc_stream_set_cursor_attributes(stream, &cursor_attr);
+ dc_stream_program_cursor_attributes(stream, &cursor_attr);
}
/* Enable MALL */
diff --git a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c
index 3e6c7be7e2..5302d2c9c7 100644
--- a/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/link/hwss/link_hwss_hpo_fixed_vs_pe_retimer_dp.c
@@ -165,7 +165,12 @@ static void set_hpo_fixed_vs_pe_retimer_dp_link_test_pattern(struct dc_link *lin
link_res->hpo_dp_link_enc->funcs->set_link_test_pattern(
link_res->hpo_dp_link_enc, tp_params);
}
+
link->dc->link_srv->dp_trace_source_sequence(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN);
+
+ // Give retimer extra time to lock before updating DP_TRAINING_PATTERN_SET to TPS1
+ if (tp_params->dp_phy_pattern == DP_TEST_PATTERN_128b_132b_TPS1_TRAINING_MODE)
+ msleep(30);
}
static void set_hpo_fixed_vs_pe_retimer_dp_lane_settings(struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
index b53ad18dbf..ec9ff5f8bd 100644
--- a/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
+++ b/drivers/gpu/drm/amd/display/dc/link/link_dpms.c
@@ -2313,8 +2313,6 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
dc->hwss.disable_audio_stream(pipe_ctx);
- edp_set_panel_assr(link, pipe_ctx, &panel_mode_dp, false);
-
update_psp_stream_config(pipe_ctx, true);
dc->hwss.blank_stream(pipe_ctx);
@@ -2368,6 +2366,7 @@ void link_set_dpms_off(struct pipe_ctx *pipe_ctx)
dc->hwss.disable_stream(pipe_ctx);
disable_link(pipe_ctx->stream->link, &pipe_ctx->link_res, pipe_ctx->stream->signal);
}
+ edp_set_panel_assr(link, pipe_ctx, &panel_mode_dp, false);
if (pipe_ctx->stream->timing.flags.DSC) {
if (dc_is_dp_signal(pipe_ctx->stream->signal))
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
index 0fcf0b8530..564246983f 100644
--- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
+++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c
@@ -454,7 +454,8 @@ bool dp_handle_hpd_rx_irq(struct dc_link *link,
* If we got sink count changed it means
* Downstream port status changed,
* then DM should call DC to do the detection.
- * NOTE: Do not handle link loss on eDP since it is internal link*/
+ * NOTE: Do not handle link loss on eDP since it is internal link
+ */
if ((link->connector_signal != SIGNAL_TYPE_EDP) &&
dp_parse_link_loss_status(
link,
diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
index 0a939437e1..6b380e037e 100644
--- a/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/resource/dcn20/dcn20_resource.c
@@ -2193,10 +2193,11 @@ bool dcn20_get_dcc_compression_cap(const struct dc *dc,
const struct dc_dcc_surface_param *input,
struct dc_surface_dcc_cap *output)
{
- return dc->res_pool->hubbub->funcs->get_dcc_compression_cap(
- dc->res_pool->hubbub,
- input,
- output);
+ if (dc->res_pool->hubbub->funcs->get_dcc_compression_cap)
+ return dc->res_pool->hubbub->funcs->get_dcc_compression_cap(
+ dc->res_pool->hubbub, input, output);
+
+ return false;
}
static void dcn20_destroy_resource_pool(struct resource_pool **pool)
diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
index 5fb21a0508..f531ce1d2b 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
@@ -929,7 +929,7 @@ static int pp_dpm_switch_power_profile(void *handle,
enum PP_SMC_POWER_PROFILE type, bool en)
{
struct pp_hwmgr *hwmgr = handle;
- long workload;
+ long workload[1];
uint32_t index;
if (!hwmgr || !hwmgr->pm_en)
@@ -947,12 +947,12 @@ static int pp_dpm_switch_power_profile(void *handle,
hwmgr->workload_mask &= ~(1 << hwmgr->workload_prority[type]);
index = fls(hwmgr->workload_mask);
index = index > 0 && index <= Workload_Policy_Max ? index - 1 : 0;
- workload = hwmgr->workload_setting[index];
+ workload[0] = hwmgr->workload_setting[index];
} else {
hwmgr->workload_mask |= (1 << hwmgr->workload_prority[type]);
index = fls(hwmgr->workload_mask);
index = index <= Workload_Policy_Max ? index - 1 : 0;
- workload = hwmgr->workload_setting[index];
+ workload[0] = hwmgr->workload_setting[index];
}
if (type == PP_SMC_POWER_PROFILE_COMPUTE &&
@@ -962,7 +962,7 @@ static int pp_dpm_switch_power_profile(void *handle,
}
if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
- hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0);
+ hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, workload, 0);
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c
index 1d829402cd..f4bd8e9357 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/pp_psm.c
@@ -269,7 +269,7 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip_display_set
struct pp_power_state *new_ps)
{
uint32_t index;
- long workload;
+ long workload[1];
if (hwmgr->not_vf) {
if (!skip_display_settings)
@@ -294,10 +294,10 @@ int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip_display_set
if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
index = fls(hwmgr->workload_mask);
index = index > 0 && index <= Workload_Policy_Max ? index - 1 : 0;
- workload = hwmgr->workload_setting[index];
+ workload[0] = hwmgr->workload_setting[index];
- if (hwmgr->power_profile_mode != workload && hwmgr->hwmgr_func->set_power_profile_mode)
- hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0);
+ if (hwmgr->power_profile_mode != workload[0] && hwmgr->hwmgr_func->set_power_profile_mode)
+ hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, workload, 0);
}
return 0;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
index 1fcd445100..f1c369945a 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
@@ -2957,6 +2957,7 @@ static int smu7_update_edc_leakage_table(struct pp_hwmgr *hwmgr)
static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
{
+ struct amdgpu_device *adev = hwmgr->adev;
struct smu7_hwmgr *data;
int result = 0;
@@ -2993,40 +2994,37 @@ static int smu7_hwmgr_backend_init(struct pp_hwmgr *hwmgr)
/* Initalize Dynamic State Adjustment Rule Settings */
result = phm_initializa_dynamic_state_adjustment_rule_settings(hwmgr);
- if (0 == result) {
- struct amdgpu_device *adev = hwmgr->adev;
+ if (result)
+ goto fail;
- data->is_tlu_enabled = false;
+ data->is_tlu_enabled = false;
- hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
+ hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
SMU7_MAX_HARDWARE_POWERLEVELS;
- hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
- hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
+ hwmgr->platform_descriptor.hardwarePerformanceLevels = 2;
+ hwmgr->platform_descriptor.minimumClocksReductionPercentage = 50;
- data->pcie_gen_cap = adev->pm.pcie_gen_mask;
- if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
- data->pcie_spc_cap = 20;
- else
- data->pcie_spc_cap = 16;
- data->pcie_lane_cap = adev->pm.pcie_mlw_mask;
-
- hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
-/* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
- hwmgr->platform_descriptor.clockStep.engineClock = 500;
- hwmgr->platform_descriptor.clockStep.memoryClock = 500;
- smu7_thermal_parameter_init(hwmgr);
- } else {
- /* Ignore return value in here, we are cleaning up a mess. */
- smu7_hwmgr_backend_fini(hwmgr);
- }
+ data->pcie_gen_cap = adev->pm.pcie_gen_mask;
+ if (data->pcie_gen_cap & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3)
+ data->pcie_spc_cap = 20;
+ else
+ data->pcie_spc_cap = 16;
+ data->pcie_lane_cap = adev->pm.pcie_mlw_mask;
+
+ hwmgr->platform_descriptor.vbiosInterruptId = 0x20000400; /* IRQ_SOURCE1_SW_INT */
+ /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
+ hwmgr->platform_descriptor.clockStep.engineClock = 500;
+ hwmgr->platform_descriptor.clockStep.memoryClock = 500;
+ smu7_thermal_parameter_init(hwmgr);
result = smu7_update_edc_leakage_table(hwmgr);
- if (result) {
- smu7_hwmgr_backend_fini(hwmgr);
- return result;
- }
+ if (result)
+ goto fail;
return 0;
+fail:
+ smu7_hwmgr_backend_fini(hwmgr);
+ return result;
}
static int smu7_force_dpm_highest(struct pp_hwmgr *hwmgr)
@@ -3316,8 +3314,7 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
const struct pp_power_state *current_ps)
{
struct amdgpu_device *adev = hwmgr->adev;
- struct smu7_power_state *smu7_ps =
- cast_phw_smu7_power_state(&request_ps->hardware);
+ struct smu7_power_state *smu7_ps;
uint32_t sclk;
uint32_t mclk;
struct PP_Clocks minimum_clocks = {0};
@@ -3334,6 +3331,10 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
uint32_t latency;
bool latency_allowed = false;
+ smu7_ps = cast_phw_smu7_power_state(&request_ps->hardware);
+ if (!smu7_ps)
+ return -EINVAL;
+
data->battery_state = (PP_StateUILabel_Battery ==
request_ps->classification.ui_label);
data->mclk_ignore_signal = false;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
index b015a601b3..eb744401e0 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
@@ -1065,16 +1065,18 @@ static int smu8_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
struct pp_power_state *prequest_ps,
const struct pp_power_state *pcurrent_ps)
{
- struct smu8_power_state *smu8_ps =
- cast_smu8_power_state(&prequest_ps->hardware);
-
- const struct smu8_power_state *smu8_current_ps =
- cast_const_smu8_power_state(&pcurrent_ps->hardware);
-
+ struct smu8_power_state *smu8_ps;
+ const struct smu8_power_state *smu8_current_ps;
struct smu8_hwmgr *data = hwmgr->backend;
struct PP_Clocks clocks = {0, 0, 0, 0};
bool force_high;
+ smu8_ps = cast_smu8_power_state(&prequest_ps->hardware);
+ smu8_current_ps = cast_const_smu8_power_state(&pcurrent_ps->hardware);
+
+ if (!smu8_ps || !smu8_current_ps)
+ return -EINVAL;
+
smu8_ps->need_dfs_bypass = true;
data->battery_state = (PP_StateUILabel_Battery == prequest_ps->classification.ui_label);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
index 9f5bd998c6..f4acdb2267 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
@@ -3259,8 +3259,7 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
const struct pp_power_state *current_ps)
{
struct amdgpu_device *adev = hwmgr->adev;
- struct vega10_power_state *vega10_ps =
- cast_phw_vega10_power_state(&request_ps->hardware);
+ struct vega10_power_state *vega10_ps;
uint32_t sclk;
uint32_t mclk;
struct PP_Clocks minimum_clocks = {0};
@@ -3278,6 +3277,10 @@ static int vega10_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
uint32_t stable_pstate_sclk = 0, stable_pstate_mclk = 0;
uint32_t latency;
+ vega10_ps = cast_phw_vega10_power_state(&request_ps->hardware);
+ if (!vega10_ps)
+ return -EINVAL;
+
data->battery_state = (PP_StateUILabel_Battery ==
request_ps->classification.ui_label);
@@ -3415,13 +3418,17 @@ static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr *hwmgr, co
const struct vega10_power_state *vega10_ps =
cast_const_phw_vega10_power_state(states->pnew_state);
struct vega10_single_dpm_table *sclk_table = &(data->dpm_table.gfx_table);
- uint32_t sclk = vega10_ps->performance_levels
- [vega10_ps->performance_level_count - 1].gfx_clock;
struct vega10_single_dpm_table *mclk_table = &(data->dpm_table.mem_table);
- uint32_t mclk = vega10_ps->performance_levels
- [vega10_ps->performance_level_count - 1].mem_clock;
+ uint32_t sclk, mclk;
uint32_t i;
+ if (vega10_ps == NULL)
+ return -EINVAL;
+ sclk = vega10_ps->performance_levels
+ [vega10_ps->performance_level_count - 1].gfx_clock;
+ mclk = vega10_ps->performance_levels
+ [vega10_ps->performance_level_count - 1].mem_clock;
+
for (i = 0; i < sclk_table->count; i++) {
if (sclk == sclk_table->dpm_levels[i].value)
break;
@@ -3728,6 +3735,9 @@ static int vega10_generate_dpm_level_enable_mask(
cast_const_phw_vega10_power_state(states->pnew_state);
int i;
+ if (vega10_ps == NULL)
+ return -EINVAL;
+
PP_ASSERT_WITH_CODE(!vega10_trim_dpm_states(hwmgr, vega10_ps),
"Attempt to Trim DPM States Failed!",
return -1);
@@ -4995,6 +5005,8 @@ static int vega10_check_states_equal(struct pp_hwmgr *hwmgr,
vega10_psa = cast_const_phw_vega10_power_state(pstate1);
vega10_psb = cast_const_phw_vega10_power_state(pstate2);
+ if (vega10_psa == NULL || vega10_psb == NULL)
+ return -EINVAL;
/* If the two states don't even have the same number of performance levels
* they cannot be the same state.
@@ -5128,6 +5140,8 @@ static int vega10_set_sclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
return -EINVAL;
vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
+ if (vega10_ps == NULL)
+ return -EINVAL;
vega10_ps->performance_levels
[vega10_ps->performance_level_count - 1].gfx_clock =
@@ -5179,6 +5193,8 @@ static int vega10_set_mclk_od(struct pp_hwmgr *hwmgr, uint32_t value)
return -EINVAL;
vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
+ if (vega10_ps == NULL)
+ return -EINVAL;
vega10_ps->performance_levels
[vega10_ps->performance_level_count - 1].mem_clock =
@@ -5420,6 +5436,9 @@ static void vega10_odn_update_power_state(struct pp_hwmgr *hwmgr)
return;
vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
+ if (vega10_ps == NULL)
+ return;
+
max_level = vega10_ps->performance_level_count - 1;
if (vega10_ps->performance_levels[max_level].gfx_clock !=
@@ -5442,6 +5461,9 @@ static void vega10_odn_update_power_state(struct pp_hwmgr *hwmgr)
ps = (struct pp_power_state *)((unsigned long)(hwmgr->ps) + hwmgr->ps_size * (hwmgr->num_ps - 1));
vega10_ps = cast_phw_vega10_power_state(&ps->hardware);
+ if (vega10_ps == NULL)
+ return;
+
max_level = vega10_ps->performance_level_count - 1;
if (vega10_ps->performance_levels[max_level].gfx_clock !=
@@ -5632,6 +5654,8 @@ static int vega10_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_
return -EINVAL;
vega10_ps = cast_const_phw_vega10_power_state(state);
+ if (vega10_ps == NULL)
+ return -EINVAL;
i = index > vega10_ps->performance_level_count - 1 ?
vega10_ps->performance_level_count - 1 : index;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index e1796ecf9c..06409133b0 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -2220,7 +2220,7 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
{
int ret = 0;
int index = 0;
- long workload;
+ long workload[1];
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
if (!skip_display_settings) {
@@ -2260,10 +2260,10 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) {
index = fls(smu->workload_mask);
index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
- workload = smu->workload_setting[index];
+ workload[0] = smu->workload_setting[index];
- if (smu->power_profile_mode != workload)
- smu_bump_power_profile_mode(smu, &workload, 0);
+ if (smu->power_profile_mode != workload[0])
+ smu_bump_power_profile_mode(smu, workload, 0);
}
return ret;
@@ -2313,7 +2313,7 @@ static int smu_switch_power_profile(void *handle,
{
struct smu_context *smu = handle;
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
- long workload;
+ long workload[1];
uint32_t index;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
@@ -2326,17 +2326,17 @@ static int smu_switch_power_profile(void *handle,
smu->workload_mask &= ~(1 << smu->workload_prority[type]);
index = fls(smu->workload_mask);
index = index > 0 && index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
- workload = smu->workload_setting[index];
+ workload[0] = smu->workload_setting[index];
} else {
smu->workload_mask |= (1 << smu->workload_prority[type]);
index = fls(smu->workload_mask);
index = index <= WORKLOAD_POLICY_MAX ? index - 1 : 0;
- workload = smu->workload_setting[index];
+ workload[0] = smu->workload_setting[index];
}
if (smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
- smu_bump_power_profile_mode(smu, &workload, 0);
+ smu_bump_power_profile_mode(smu, workload, 0);
return 0;
}
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
index 6a4f20fccf..7b0bc9704e 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_reg.c
@@ -1027,7 +1027,6 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
u32 status_reg;
u8 *buffer = msg->buffer;
unsigned int i;
- int num_transferred = 0;
int ret;
/* Buffer size of AUX CH is 16 bytes */
@@ -1079,7 +1078,6 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
reg = buffer[i];
writel(reg, dp->reg_base + ANALOGIX_DP_BUF_DATA_0 +
4 * i);
- num_transferred++;
}
}
@@ -1127,7 +1125,6 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
reg = readl(dp->reg_base + ANALOGIX_DP_BUF_DATA_0 +
4 * i);
buffer[i] = (unsigned char)reg;
- num_transferred++;
}
}
@@ -1144,7 +1141,7 @@ ssize_t analogix_dp_transfer(struct analogix_dp_device *dp,
(msg->request & ~DP_AUX_I2C_MOT) == DP_AUX_NATIVE_READ)
msg->reply = DP_AUX_NATIVE_REPLY_ACK;
- return num_transferred > 0 ? num_transferred : -EBUSY;
+ return msg->size;
aux_error:
/* if aux err happen, reset aux */
diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
index 68831f4e50..fc2ceae61d 100644
--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
@@ -4069,6 +4069,7 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
const struct drm_dp_connection_status_notify *conn_stat =
&up_req->msg.u.conn_stat;
+ bool handle_csn;
drm_dbg_kms(mgr->dev, "Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n",
conn_stat->port_number,
@@ -4077,6 +4078,16 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
conn_stat->message_capability_status,
conn_stat->input_port,
conn_stat->peer_device_type);
+
+ mutex_lock(&mgr->probe_lock);
+ handle_csn = mgr->mst_primary->link_address_sent;
+ mutex_unlock(&mgr->probe_lock);
+
+ if (!handle_csn) {
+ drm_dbg_kms(mgr->dev, "Got CSN before finish topology probing. Skip it.");
+ kfree(up_req);
+ goto out;
+ }
} else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
const struct drm_dp_resource_status_notify *res_stat =
&up_req->msg.u.resource_stat;
diff --git a/drivers/gpu/drm/drm_atomic_uapi.c b/drivers/gpu/drm/drm_atomic_uapi.c
index 02b1235c6d..106292d6ed 100644
--- a/drivers/gpu/drm/drm_atomic_uapi.c
+++ b/drivers/gpu/drm/drm_atomic_uapi.c
@@ -1067,23 +1067,16 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
}
if (async_flip &&
- prop != config->prop_fb_id &&
- prop != config->prop_in_fence_fd &&
- prop != config->prop_fb_damage_clips) {
+ (plane_state->plane->type != DRM_PLANE_TYPE_PRIMARY ||
+ (prop != config->prop_fb_id &&
+ prop != config->prop_in_fence_fd &&
+ prop != config->prop_fb_damage_clips))) {
ret = drm_atomic_plane_get_property(plane, plane_state,
prop, &old_val);
ret = drm_atomic_check_prop_changes(ret, old_val, prop_value, prop);
break;
}
- if (async_flip && plane_state->plane->type != DRM_PLANE_TYPE_PRIMARY) {
- drm_dbg_atomic(prop->dev,
- "[OBJECT:%d] Only primary planes can be changed during async flip\n",
- obj->id);
- ret = -EINVAL;
- break;
- }
-
ret = drm_atomic_plane_set_property(plane,
plane_state, file_priv,
prop, prop_value);
diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
index 31af5cf37a..cee5eafbfb 100644
--- a/drivers/gpu/drm/drm_client_modeset.c
+++ b/drivers/gpu/drm/drm_client_modeset.c
@@ -880,6 +880,11 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
kfree(modeset->mode);
modeset->mode = drm_mode_duplicate(dev, mode);
+ if (!modeset->mode) {
+ ret = -ENOMEM;
+ break;
+ }
+
drm_connector_get(connector);
modeset->connectors[modeset->num_connectors++] = connector;
modeset->x = offset->x;
diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c
index 071668bfe5..6c33331367 100644
--- a/drivers/gpu/drm/i915/display/intel_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_backlight.c
@@ -1449,6 +1449,9 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
static int cnp_num_backlight_controllers(struct drm_i915_private *i915)
{
+ if (INTEL_PCH_TYPE(i915) >= PCH_MTL)
+ return 2;
+
if (INTEL_PCH_TYPE(i915) >= PCH_DG1)
return 1;
diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c
index 0ccbf9a859..eca07436d1 100644
--- a/drivers/gpu/drm/i915/display/intel_pps.c
+++ b/drivers/gpu/drm/i915/display/intel_pps.c
@@ -351,6 +351,9 @@ static int intel_num_pps(struct drm_i915_private *i915)
if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
return 2;
+ if (INTEL_PCH_TYPE(i915) >= PCH_MTL)
+ return 2;
+
if (INTEL_PCH_TYPE(i915) >= PCH_DG1)
return 1;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index a2195e28b6..cac6d41845 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -290,6 +290,41 @@ out:
return i915_error_to_vmf_fault(err);
}
+static void set_address_limits(struct vm_area_struct *area,
+ struct i915_vma *vma,
+ unsigned long obj_offset,
+ unsigned long *start_vaddr,
+ unsigned long *end_vaddr)
+{
+ unsigned long vm_start, vm_end, vma_size; /* user's memory parameters */
+ long start, end; /* memory boundaries */
+
+ /*
+ * Let's move into the ">> PAGE_SHIFT"
+ * domain to be sure not to lose bits
+ */
+ vm_start = area->vm_start >> PAGE_SHIFT;
+ vm_end = area->vm_end >> PAGE_SHIFT;
+ vma_size = vma->size >> PAGE_SHIFT;
+
+ /*
+ * Calculate the memory boundaries by considering the offset
+ * provided by the user during memory mapping and the offset
+ * provided for the partial mapping.
+ */
+ start = vm_start;
+ start -= obj_offset;
+ start += vma->gtt_view.partial.offset;
+ end = start + vma_size;
+
+ start = max_t(long, start, vm_start);
+ end = min_t(long, end, vm_end);
+
+ /* Let's move back into the "<< PAGE_SHIFT" domain */
+ *start_vaddr = (unsigned long)start << PAGE_SHIFT;
+ *end_vaddr = (unsigned long)end << PAGE_SHIFT;
+}
+
static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
{
#define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT)
@@ -302,14 +337,18 @@ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
bool write = area->vm_flags & VM_WRITE;
struct i915_gem_ww_ctx ww;
+ unsigned long obj_offset;
+ unsigned long start, end; /* memory boundaries */
intel_wakeref_t wakeref;
struct i915_vma *vma;
pgoff_t page_offset;
+ unsigned long pfn;
int srcu;
int ret;
- /* We don't use vmf->pgoff since that has the fake offset */
+ obj_offset = area->vm_pgoff - drm_vma_node_start(&mmo->vma_node);
page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
+ page_offset += obj_offset;
trace_i915_gem_object_fault(obj, page_offset, true, write);
@@ -402,12 +441,14 @@ retry:
if (ret)
goto err_unpin;
+ set_address_limits(area, vma, obj_offset, &start, &end);
+
+ pfn = (ggtt->gmadr.start + i915_ggtt_offset(vma)) >> PAGE_SHIFT;
+ pfn += (start - area->vm_start) >> PAGE_SHIFT;
+ pfn += obj_offset - vma->gtt_view.partial.offset;
+
/* Finally, remap it using the new GTT offset */
- ret = remap_io_mapping(area,
- area->vm_start + (vma->gtt_view.partial.offset << PAGE_SHIFT),
- (ggtt->gmadr.start + i915_ggtt_offset(vma)) >> PAGE_SHIFT,
- min_t(u64, vma->size, area->vm_end - area->vm_start),
- &ggtt->iomap);
+ ret = remap_io_mapping(area, start, pfn, end - start, &ggtt->iomap);
if (ret)
goto err_fence;
@@ -1084,6 +1125,8 @@ int i915_gem_fb_mmap(struct drm_i915_gem_object *obj, struct vm_area_struct *vma
mmo = mmap_offset_attach(obj, mmap_type, NULL);
if (IS_ERR(mmo))
return PTR_ERR(mmo);
+
+ vma->vm_pgoff += drm_vma_node_start(&mmo->vma_node);
}
/*
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
index e6f177183c..5c72462d1f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c
@@ -165,7 +165,6 @@ i915_ttm_placement_from_obj(const struct drm_i915_gem_object *obj,
i915_ttm_place_from_region(num_allowed ? obj->mm.placements[0] :
obj->mm.region, &places[0], obj->bo_offset,
obj->base.size, flags);
- places[0].flags |= TTM_PL_FLAG_DESIRED;
/* Cache this on object? */
for (i = 0; i < num_allowed; ++i) {
@@ -779,13 +778,16 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj,
.interruptible = true,
.no_wait_gpu = false,
};
- int real_num_busy;
+ struct ttm_placement initial_placement;
+ struct ttm_place initial_place;
int ret;
/* First try only the requested placement. No eviction. */
- real_num_busy = placement->num_placement;
- placement->num_placement = 1;
- ret = ttm_bo_validate(bo, placement, &ctx);
+ initial_placement.num_placement = 1;
+ memcpy(&initial_place, placement->placement, sizeof(struct ttm_place));
+ initial_place.flags |= TTM_PL_FLAG_DESIRED;
+ initial_placement.placement = &initial_place;
+ ret = ttm_bo_validate(bo, &initial_placement, &ctx);
if (ret) {
ret = i915_ttm_err_to_gem(ret);
/*
@@ -800,7 +802,6 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj,
* If the initial attempt fails, allow all accepted placements,
* evicting if necessary.
*/
- placement->num_placement = real_num_busy;
ret = ttm_bo_validate(bo, placement, &ctx);
if (ret)
return i915_ttm_err_to_gem(ret);
diff --git a/drivers/gpu/drm/lima/lima_drv.c b/drivers/gpu/drm/lima/lima_drv.c
index 739c865b55..10bce18b7c 100644
--- a/drivers/gpu/drm/lima/lima_drv.c
+++ b/drivers/gpu/drm/lima/lima_drv.c
@@ -501,3 +501,4 @@ module_platform_driver(lima_platform_driver);
MODULE_AUTHOR("Lima Project Developers");
MODULE_DESCRIPTION("Lima DRM Driver");
MODULE_LICENSE("GPL v2");
+MODULE_SOFTDEP("pre: governor_simpleondemand");
diff --git a/drivers/gpu/drm/mgag200/mgag200_i2c.c b/drivers/gpu/drm/mgag200/mgag200_i2c.c
index 423eb302be..4caeb68f30 100644
--- a/drivers/gpu/drm/mgag200/mgag200_i2c.c
+++ b/drivers/gpu/drm/mgag200/mgag200_i2c.c
@@ -31,6 +31,8 @@
#include <linux/i2c.h>
#include <linux/pci.h>
+#include <drm/drm_managed.h>
+
#include "mgag200_drv.h"
static int mga_i2c_read_gpio(struct mga_device *mdev)
@@ -86,7 +88,7 @@ static int mga_gpio_getscl(void *data)
return (mga_i2c_read_gpio(mdev) & i2c->clock) ? 1 : 0;
}
-static void mgag200_i2c_release(void *res)
+static void mgag200_i2c_release(struct drm_device *dev, void *res)
{
struct mga_i2c_chan *i2c = res;
@@ -114,7 +116,7 @@ int mgag200_i2c_init(struct mga_device *mdev, struct mga_i2c_chan *i2c)
i2c->adapter.algo_data = &i2c->bit;
i2c->bit.udelay = 10;
- i2c->bit.timeout = 2;
+ i2c->bit.timeout = usecs_to_jiffies(2200);
i2c->bit.data = i2c;
i2c->bit.setsda = mga_gpio_setsda;
i2c->bit.setscl = mga_gpio_setscl;
@@ -125,5 +127,5 @@ int mgag200_i2c_init(struct mga_device *mdev, struct mga_i2c_chan *i2c)
if (ret)
return ret;
- return devm_add_action_or_reset(dev->dev, mgag200_i2c_release, i2c);
+ return drmm_add_action_or_reset(dev, mgag200_i2c_release, i2c);
}
diff --git a/drivers/gpu/drm/radeon/pptable.h b/drivers/gpu/drm/radeon/pptable.h
index b7f22597ee..969a8fb0ee 100644
--- a/drivers/gpu/drm/radeon/pptable.h
+++ b/drivers/gpu/drm/radeon/pptable.h
@@ -439,7 +439,7 @@ typedef struct _StateArray{
//how many states we have
UCHAR ucNumEntries;
- ATOM_PPLIB_STATE_V2 states[] __counted_by(ucNumEntries);
+ ATOM_PPLIB_STATE_V2 states[] /* __counted_by(ucNumEntries) */;
}StateArray;
diff --git a/drivers/gpu/drm/tests/drm_gem_shmem_test.c b/drivers/gpu/drm/tests/drm_gem_shmem_test.c
index 91202e40cd..60c6527827 100644
--- a/drivers/gpu/drm/tests/drm_gem_shmem_test.c
+++ b/drivers/gpu/drm/tests/drm_gem_shmem_test.c
@@ -102,6 +102,17 @@ static void drm_gem_shmem_test_obj_create_private(struct kunit *test)
sg_init_one(sgt->sgl, buf, TEST_SIZE);
+ /*
+ * Set the DMA mask to 64-bits and map the sgtables
+ * otherwise drm_gem_shmem_free will cause a warning
+ * on debug kernels.
+ */
+ ret = dma_set_mask(drm_dev->dev, DMA_BIT_MASK(64));
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
+ ret = dma_map_sgtable(drm_dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
+ KUNIT_ASSERT_EQ(test, ret, 0);
+
/* Init a mock DMA-BUF */
buf_mock.size = TEST_SIZE;
attach_mock.dmabuf = &buf_mock;
diff --git a/drivers/gpu/drm/xe/regs/xe_engine_regs.h b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
index af71b87d80..03c6d4d50a 100644
--- a/drivers/gpu/drm/xe/regs/xe_engine_regs.h
+++ b/drivers/gpu/drm/xe/regs/xe_engine_regs.h
@@ -44,9 +44,10 @@
#define GSCCS_RING_BASE 0x11a000
#define RING_TAIL(base) XE_REG((base) + 0x30)
+#define TAIL_ADDR REG_GENMASK(20, 3)
#define RING_HEAD(base) XE_REG((base) + 0x34)
-#define HEAD_ADDR 0x001FFFFC
+#define HEAD_ADDR REG_GENMASK(20, 2)
#define RING_START(base) XE_REG((base) + 0x38)
@@ -135,7 +136,6 @@
#define RING_VALID_MASK 0x00000001
#define RING_VALID 0x00000001
#define STOP_RING REG_BIT(8)
-#define TAIL_ADDR 0x001FFFF8
#define RING_CTX_TIMESTAMP(base) XE_REG((base) + 0x3a8)
#define CSBE_DEBUG_STATUS(base) XE_REG((base) + 0x3fc)
diff --git a/drivers/gpu/drm/xe/xe_guc_submit.c b/drivers/gpu/drm/xe/xe_guc_submit.c
index e4e3658e6a..0f42971ff0 100644
--- a/drivers/gpu/drm/xe/xe_guc_submit.c
+++ b/drivers/gpu/drm/xe/xe_guc_submit.c
@@ -1429,8 +1429,8 @@ static void guc_exec_queue_stop(struct xe_guc *guc, struct xe_exec_queue *q)
!xe_sched_job_completed(job)) ||
xe_sched_invalidate_job(job, 2)) {
trace_xe_sched_job_ban(job);
- xe_sched_tdr_queue_imm(&q->guc->sched);
set_exec_queue_banned(q);
+ xe_sched_tdr_queue_imm(&q->guc->sched);
}
}
}
diff --git a/drivers/gpu/drm/xe/xe_hwmon.c b/drivers/gpu/drm/xe/xe_hwmon.c
index 453e601ddd..d37f1dea9f 100644
--- a/drivers/gpu/drm/xe/xe_hwmon.c
+++ b/drivers/gpu/drm/xe/xe_hwmon.c
@@ -200,9 +200,10 @@ static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, int channel, long va
PKG_PWR_LIM_1_EN, 0, channel);
if (reg_val & PKG_PWR_LIM_1_EN) {
+ drm_warn(&gt_to_xe(hwmon->gt)->drm, "PL1 disable is not supported!\n");
ret = -EOPNOTSUPP;
- goto unlock;
}
+ goto unlock;
}
/* Computation in 64-bits to avoid overflow. Round to nearest. */
diff --git a/drivers/gpu/drm/xe/xe_lrc.c b/drivers/gpu/drm/xe/xe_lrc.c
index 615bbc372a..d7bf7bc9dc 100644
--- a/drivers/gpu/drm/xe/xe_lrc.c
+++ b/drivers/gpu/drm/xe/xe_lrc.c
@@ -1354,7 +1354,10 @@ struct xe_lrc_snapshot *xe_lrc_snapshot_capture(struct xe_lrc *lrc)
if (!snapshot)
return NULL;
- snapshot->context_desc = lower_32_bits(xe_lrc_ggtt_addr(lrc));
+ if (lrc->bo && lrc->bo->vm)
+ xe_vm_get(lrc->bo->vm);
+
+ snapshot->context_desc = xe_lrc_ggtt_addr(lrc);
snapshot->head = xe_lrc_ring_head(lrc);
snapshot->tail.internal = lrc->ring.tail;
snapshot->tail.memory = xe_lrc_read_ctx_reg(lrc, CTX_RING_TAIL);
@@ -1370,12 +1373,14 @@ struct xe_lrc_snapshot *xe_lrc_snapshot_capture(struct xe_lrc *lrc)
void xe_lrc_snapshot_capture_delayed(struct xe_lrc_snapshot *snapshot)
{
struct xe_bo *bo;
+ struct xe_vm *vm;
struct iosys_map src;
if (!snapshot)
return;
bo = snapshot->lrc_bo;
+ vm = bo->vm;
snapshot->lrc_bo = NULL;
snapshot->lrc_snapshot = kvmalloc(snapshot->lrc_size, GFP_KERNEL);
@@ -1395,6 +1400,8 @@ void xe_lrc_snapshot_capture_delayed(struct xe_lrc_snapshot *snapshot)
dma_resv_unlock(bo->ttm.base.resv);
put_bo:
xe_bo_put(bo);
+ if (vm)
+ xe_vm_put(vm);
}
void xe_lrc_snapshot_print(struct xe_lrc_snapshot *snapshot, struct drm_printer *p)
@@ -1440,7 +1447,13 @@ void xe_lrc_snapshot_free(struct xe_lrc_snapshot *snapshot)
return;
kvfree(snapshot->lrc_snapshot);
- if (snapshot->lrc_bo)
+ if (snapshot->lrc_bo) {
+ struct xe_vm *vm;
+
+ vm = snapshot->lrc_bo->vm;
xe_bo_put(snapshot->lrc_bo);
+ if (vm)
+ xe_vm_put(vm);
+ }
kfree(snapshot);
}
diff --git a/drivers/gpu/drm/xe/xe_preempt_fence.c b/drivers/gpu/drm/xe/xe_preempt_fence.c
index 7d50c6e89d..5b243b7feb 100644
--- a/drivers/gpu/drm/xe/xe_preempt_fence.c
+++ b/drivers/gpu/drm/xe/xe_preempt_fence.c
@@ -23,11 +23,19 @@ static void preempt_fence_work_func(struct work_struct *w)
q->ops->suspend_wait(q);
dma_fence_signal(&pfence->base);
- dma_fence_end_signalling(cookie);
-
+ /*
+ * Opt for keep everything in the fence critical section. This looks really strange since we
+ * have just signalled the fence, however the preempt fences are all signalled via single
+ * global ordered-wq, therefore anything that happens in this callback can easily block
+ * progress on the entire wq, which itself may prevent other published preempt fences from
+ * ever signalling. Therefore try to keep everything here in the callback in the fence
+ * critical section. For example if something below grabs a scary lock like vm->lock,
+ * lockdep should complain since we also hold that lock whilst waiting on preempt fences to
+ * complete.
+ */
xe_vm_queue_rebind_worker(q->vm);
-
xe_exec_queue_put(q);
+ dma_fence_end_signalling(cookie);
}
static const char *
diff --git a/drivers/gpu/drm/xe/xe_rtp.c b/drivers/gpu/drm/xe/xe_rtp.c
index fb44cc7521..10326bd1bf 100644
--- a/drivers/gpu/drm/xe/xe_rtp.c
+++ b/drivers/gpu/drm/xe/xe_rtp.c
@@ -200,7 +200,7 @@ static void rtp_mark_active(struct xe_device *xe,
if (first == last)
bitmap_set(ctx->active_entries, first, 1);
else
- bitmap_set(ctx->active_entries, first, last - first + 2);
+ bitmap_set(ctx->active_entries, first, last - first + 1);
}
/**
diff --git a/drivers/gpu/drm/xe/xe_sync.c b/drivers/gpu/drm/xe/xe_sync.c
index 65f1f16282..2bfff99845 100644
--- a/drivers/gpu/drm/xe/xe_sync.c
+++ b/drivers/gpu/drm/xe/xe_sync.c
@@ -263,7 +263,7 @@ void xe_sync_entry_cleanup(struct xe_sync_entry *sync)
if (sync->fence)
dma_fence_put(sync->fence);
if (sync->chain_fence)
- dma_fence_put(&sync->chain_fence->base);
+ dma_fence_chain_free(sync->chain_fence);
if (sync->ufence)
user_fence_put(sync->ufence);
}