summaryrefslogtreecommitdiffstats
path: root/drivers/gpu
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-04 10:10:17 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-04 10:10:17 +0000
commit9db5397d87fc212863d4750c85ff8b9a6ed42b95 (patch)
tree1bd4e8854203c575eabaa99e2c509f8886065733 /drivers/gpu
parentAdding upstream version 6.1.76. (diff)
downloadlinux-9db5397d87fc212863d4750c85ff8b9a6ed42b95.tar.xz
linux-9db5397d87fc212863d4750c85ff8b9a6ed42b95.zip
Adding upstream version 6.1.82.upstream/6.1.82
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/aldebaran.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_ih.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_ih.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/iceland_ih.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/ih_v6_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_ih.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_ih.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/tonga_ih.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega10_ih.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_ih.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_svm.c42
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c24
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/Makefile10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c33
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h1
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h3
-rw-r--r--drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c6
-rw-r--r--drivers/gpu/drm/amd/display/modules/power/power_helpers.c2
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c29
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c2
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.c51
-rw-r--r--drivers/gpu/drm/bridge/analogix/anx7625.h4
-rw-r--r--drivers/gpu/drm/drm_buddy.c10
-rw-r--r--drivers/gpu/drm/drm_file.c2
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c2
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c17
-rw-r--r--drivers/gpu/drm/drm_prime.c2
-rw-r--r--drivers/gpu/drm/drm_syncobj.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c11
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c3
-rw-r--r--drivers/gpu/drm/meson/meson_drv.c23
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_cvbs.c1
-rw-r--r--drivers/gpu/drm/meson/meson_encoder_hdmi.c1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c9
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h1
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.c5
-rw-r--r--drivers/gpu/drm/msm/dp/dp_link.c22
-rw-r--r--drivers/gpu/drm/msm/dp/dp_reg.h3
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c4
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c32
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_svm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c8
-rw-r--r--drivers/gpu/drm/panel/panel-edp.c48
-rw-r--r--drivers/gpu/drm/tegra/drm.c23
-rw-r--r--drivers/gpu/drm/ttm/ttm_pool.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c1
73 files changed, 536 insertions, 159 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran.c b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
index 2b97b8a96..fa6193535 100644
--- a/drivers/gpu/drm/amd/amdgpu/aldebaran.c
+++ b/drivers/gpu/drm/amd/amdgpu/aldebaran.c
@@ -333,6 +333,7 @@ aldebaran_mode2_restore_hwcontext(struct amdgpu_reset_control *reset_ctl,
{
struct list_head *reset_device_list = reset_context->reset_device_list;
struct amdgpu_device *tmp_adev = NULL;
+ struct amdgpu_ras *con;
int r;
if (reset_device_list == NULL)
@@ -358,7 +359,30 @@ aldebaran_mode2_restore_hwcontext(struct amdgpu_reset_control *reset_ctl,
*/
amdgpu_register_gpu_instance(tmp_adev);
- /* Resume RAS */
+ /* Resume RAS, ecc_irq */
+ con = amdgpu_ras_get_context(tmp_adev);
+ if (!amdgpu_sriov_vf(tmp_adev) && con) {
+ if (tmp_adev->sdma.ras &&
+ tmp_adev->sdma.ras->ras_block.ras_late_init) {
+ r = tmp_adev->sdma.ras->ras_block.ras_late_init(tmp_adev,
+ &tmp_adev->sdma.ras->ras_block.ras_comm);
+ if (r) {
+ dev_err(tmp_adev->dev, "SDMA failed to execute ras_late_init! ret:%d\n", r);
+ goto end;
+ }
+ }
+
+ if (tmp_adev->gfx.ras &&
+ tmp_adev->gfx.ras->ras_block.ras_late_init) {
+ r = tmp_adev->gfx.ras->ras_block.ras_late_init(tmp_adev,
+ &tmp_adev->gfx.ras->ras_block.ras_comm);
+ if (r) {
+ dev_err(tmp_adev->dev, "GFX failed to execute ras_late_init! ret:%d\n", r);
+ goto end;
+ }
+ }
+ }
+
amdgpu_ras_resume(tmp_adev);
/* Update PSP FW topology after reset */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index c46c6fbd2..e636c7850 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -999,6 +999,8 @@ struct amdgpu_device {
bool in_s3;
bool in_s4;
bool in_s0ix;
+ /* indicate amdgpu suspension status */
+ bool suspend_complete;
enum pp_mp1_state mp1_state;
struct amdgpu_doorbell_index doorbell_index;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c
index 469785d33..1ef758ac5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_fence.c
@@ -90,7 +90,7 @@ struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f)
return NULL;
fence = container_of(f, struct amdgpu_amdkfd_fence, base);
- if (fence && f->ops == &amdkfd_fence_ops)
+ if (f->ops == &amdkfd_fence_ops)
return fence;
return NULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index a5352e5e2..6a4749c0c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1310,6 +1310,7 @@ bool amdgpu_device_need_post(struct amdgpu_device *adev)
return true;
fw_ver = *((uint32_t *)adev->pm.fw->data + 69);
+ release_firmware(adev->pm.fw);
if (fw_ver < 0x00160e00)
return true;
}
@@ -4202,7 +4203,6 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, true);
cancel_delayed_work_sync(&adev->delayed_init_work);
- flush_delayed_work(&adev->gfx.gfx_off_delay_work);
amdgpu_ras_suspend(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index b9983ca99..f24c3a20e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -2414,6 +2414,7 @@ static int amdgpu_pmops_suspend(struct device *dev)
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(drm_dev);
+ adev->suspend_complete = false;
if (amdgpu_acpi_is_s0ix_active(adev))
adev->in_s0ix = true;
else if (amdgpu_acpi_is_s3_active(adev))
@@ -2428,6 +2429,7 @@ static int amdgpu_pmops_suspend_noirq(struct device *dev)
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(drm_dev);
+ adev->suspend_complete = true;
if (amdgpu_acpi_should_gpu_reset(adev))
return amdgpu_asic_reset(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 23f0067f9..b803e785d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -585,8 +585,15 @@ void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
if (adev->gfx.gfx_off_req_count == 0 &&
!adev->gfx.gfx_off_state) {
- schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
+ /* If going to s2idle, no need to wait */
+ if (adev->in_s0ix) {
+ if (!amdgpu_dpm_set_powergating_by_smu(adev,
+ AMD_IP_BLOCK_TYPE_GFX, true))
+ adev->gfx.gfx_off_state = true;
+ } else {
+ schedule_delayed_work(&adev->gfx.gfx_off_delay_work,
delay);
+ }
}
} else {
if (adev->gfx.gfx_off_req_count == 0) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
index 2bc791ed8..ea0fb079f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c
@@ -808,19 +808,26 @@ int amdgpu_gmc_vram_checking(struct amdgpu_device *adev)
* seconds, so here, we just pick up three parts for emulation.
*/
ret = memcmp(vram_ptr, cptr, 10);
- if (ret)
- return ret;
+ if (ret) {
+ ret = -EIO;
+ goto release_buffer;
+ }
ret = memcmp(vram_ptr + (size / 2), cptr, 10);
- if (ret)
- return ret;
+ if (ret) {
+ ret = -EIO;
+ goto release_buffer;
+ }
ret = memcmp(vram_ptr + size - 10, cptr, 10);
- if (ret)
- return ret;
+ if (ret) {
+ ret = -EIO;
+ goto release_buffer;
+ }
+release_buffer:
amdgpu_bo_free_kernel(&vram_bo, &vram_gpu,
&vram_ptr);
- return 0;
+ return ret;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 0ee7c935f..cde2fd2f7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -1222,19 +1222,15 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
* amdgpu_bo_move_notify - notification about a memory move
* @bo: pointer to a buffer object
* @evict: if this move is evicting the buffer from the graphics address space
- * @new_mem: new information of the bufer object
*
* Marks the corresponding &amdgpu_bo buffer object as invalid, also performs
* bookkeeping.
* TTM driver callback which is called when ttm moves a buffer.
*/
-void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
- bool evict,
- struct ttm_resource *new_mem)
+void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, bool evict)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct amdgpu_bo *abo;
- struct ttm_resource *old_mem = bo->resource;
if (!amdgpu_bo_is_amdgpu_bo(bo))
return;
@@ -1251,13 +1247,6 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
/* remember the eviction */
if (evict)
atomic64_inc(&adev->num_evictions);
-
- /* update statistics */
- if (!new_mem)
- return;
-
- /* move_notify is called before move happens */
- trace_amdgpu_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
}
void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 6dcd7bab4..2ada421e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -312,9 +312,7 @@ int amdgpu_bo_set_metadata (struct amdgpu_bo *bo, void *metadata,
int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
size_t buffer_size, uint32_t *metadata_size,
uint64_t *flags);
-void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
- bool evict,
- struct ttm_resource *new_mem);
+void amdgpu_bo_move_notify(struct ttm_buffer_object *bo, bool evict);
void amdgpu_bo_release_notify(struct ttm_buffer_object *bo);
vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index 090e66a1b..54bdbd83a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -191,7 +191,8 @@ static bool amdgpu_sync_test_fence(struct amdgpu_device *adev,
/* Never sync to VM updates either. */
if (fence_owner == AMDGPU_FENCE_OWNER_VM &&
- owner != AMDGPU_FENCE_OWNER_UNDEFINED)
+ owner != AMDGPU_FENCE_OWNER_UNDEFINED &&
+ owner != AMDGPU_FENCE_OWNER_KFD)
return false;
/* Ignore fences depending on the sync mode */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 10469f20a..158b79188 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -555,10 +555,11 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
return r;
}
+ trace_amdgpu_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
out:
/* update statistics */
atomic64_add(bo->base.size, &adev->num_bytes_moved);
- amdgpu_bo_move_notify(bo, evict, new_mem);
+ amdgpu_bo_move_notify(bo, evict);
return 0;
}
@@ -1503,7 +1504,7 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
static void
amdgpu_bo_delete_mem_notify(struct ttm_buffer_object *bo)
{
- amdgpu_bo_move_notify(bo, false, NULL);
+ amdgpu_bo_move_notify(bo, false);
}
static struct ttm_device_funcs amdgpu_bo_driver = {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
index 6e7058a2d..779707f19 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c
@@ -1110,9 +1110,13 @@ int amdgpu_ucode_request(struct amdgpu_device *adev, const struct firmware **fw,
if (err)
return -ENODEV;
+
err = amdgpu_ucode_validate(*fw);
- if (err)
+ if (err) {
dev_dbg(adev->dev, "\"%s\" failed to validate\n", fw_name);
+ release_firmware(*fw);
+ *fw = NULL;
+ }
return err;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_ih.c b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
index df385ffc9..6578ca1b9 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_ih.c
@@ -204,6 +204,12 @@ static u32 cik_ih_get_wptr(struct amdgpu_device *adev,
tmp = RREG32(mmIH_RB_CNTL);
tmp |= IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
WREG32(mmIH_RB_CNTL, tmp);
+
+ /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
+ * can be detected.
+ */
+ tmp &= ~IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
+ WREG32(mmIH_RB_CNTL, tmp);
}
return (wptr & ih->ptr_mask);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_ih.c b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
index b8c47e0cf..c19681492 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_ih.c
@@ -216,6 +216,11 @@ static u32 cz_ih_get_wptr(struct amdgpu_device *adev,
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
WREG32(mmIH_RB_CNTL, tmp);
+ /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
+ * can be detected.
+ */
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
+ WREG32(mmIH_RB_CNTL, tmp);
out:
return (wptr & ih->ptr_mask);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 84ca601f7..195b29892 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -3064,6 +3064,14 @@ static int gfx_v9_0_cp_gfx_start(struct amdgpu_device *adev)
gfx_v9_0_cp_gfx_enable(adev, true);
+ /* Now only limit the quirk on the APU gfx9 series and already
+ * confirmed that the APU gfx10/gfx11 needn't such update.
+ */
+ if (adev->flags & AMD_IS_APU &&
+ adev->in_s3 && !adev->suspend_complete) {
+ DRM_INFO(" Will skip the CSB packet resubmit\n");
+ return 0;
+ }
r = amdgpu_ring_alloc(ring, gfx_v9_0_get_csb_size(adev) + 4 + 3);
if (r) {
DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
index d96ee48e1..35921b41f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c
@@ -1144,6 +1144,10 @@ static int gmc_v10_0_hw_fini(void *handle)
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
+ if (adev->gmc.ecc_irq.funcs &&
+ amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC))
+ amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
index 7124347d2..310a5607d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v11_0.c
@@ -951,6 +951,11 @@ static int gmc_v11_0_hw_fini(void *handle)
}
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
+
+ if (adev->gmc.ecc_irq.funcs &&
+ amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC))
+ amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
+
gmc_v11_0_gart_disable(adev);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index ec291d28e..345f0c9f5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -921,8 +921,8 @@ static int gmc_v6_0_hw_init(void *handle)
if (amdgpu_emu_mode == 1)
return amdgpu_gmc_vram_checking(adev);
- else
- return r;
+
+ return 0;
}
static int gmc_v6_0_hw_fini(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 979da6f51..12411f4c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -1110,8 +1110,8 @@ static int gmc_v7_0_hw_init(void *handle)
if (amdgpu_emu_mode == 1)
return amdgpu_gmc_vram_checking(adev);
- else
- return r;
+
+ return 0;
}
static int gmc_v7_0_hw_fini(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 382dde1ce..cec9926e8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -1240,8 +1240,8 @@ static int gmc_v8_0_hw_init(void *handle)
if (amdgpu_emu_mode == 1)
return amdgpu_gmc_vram_checking(adev);
- else
- return r;
+
+ return 0;
}
static int gmc_v8_0_hw_fini(void *handle)
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 0d9e9d9dd..3e631aefa 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -1861,8 +1861,8 @@ static int gmc_v9_0_hw_init(void *handle)
if (amdgpu_emu_mode == 1)
return amdgpu_gmc_vram_checking(adev);
- else
- return r;
+
+ return 0;
}
/**
@@ -1900,6 +1900,10 @@ static int gmc_v9_0_hw_fini(void *handle)
amdgpu_irq_put(adev, &adev->gmc.vm_fault, 0);
+ if (adev->gmc.ecc_irq.funcs &&
+ amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__UMC))
+ amdgpu_irq_put(adev, &adev->gmc.ecc_irq, 0);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
index aecad530b..2c02ae698 100644
--- a/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/iceland_ih.c
@@ -215,6 +215,11 @@ static u32 iceland_ih_get_wptr(struct amdgpu_device *adev,
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
WREG32(mmIH_RB_CNTL, tmp);
+ /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
+ * can be detected.
+ */
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
+ WREG32(mmIH_RB_CNTL, tmp);
out:
return (wptr & ih->ptr_mask);
diff --git a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
index 7cd79a384..657e4ca6f 100644
--- a/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/ih_v6_0.c
@@ -417,6 +417,12 @@ static u32 ih_v6_0_get_wptr(struct amdgpu_device *adev,
tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
+
+ /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
+ * can be detected.
+ */
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
+ WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
out:
return (wptr & ih->ptr_mask);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
index eec13cb5b..84e8e8b00 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
@@ -442,6 +442,12 @@ static u32 navi10_ih_get_wptr(struct amdgpu_device *adev,
tmp = RREG32_NO_KIQ(ih_regs->ih_rb_cntl);
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
+
+ /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
+ * can be detected.
+ */
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
+ WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
out:
return (wptr & ih->ptr_mask);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/si_ih.c b/drivers/gpu/drm/amd/amdgpu/si_ih.c
index 9a24f17a5..cada9f300 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_ih.c
@@ -119,6 +119,12 @@ static u32 si_ih_get_wptr(struct amdgpu_device *adev,
tmp = RREG32(IH_RB_CNTL);
tmp |= IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
WREG32(IH_RB_CNTL, tmp);
+
+ /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
+ * can be detected.
+ */
+ tmp &= ~IH_RB_CNTL__WPTR_OVERFLOW_CLEAR_MASK;
+ WREG32(IH_RB_CNTL, tmp);
}
return (wptr & ih->ptr_mask);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 811dd3ea6..489c89465 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -1285,10 +1285,32 @@ static int soc15_common_suspend(void *handle)
return soc15_common_hw_fini(adev);
}
+static bool soc15_need_reset_on_resume(struct amdgpu_device *adev)
+{
+ u32 sol_reg;
+
+ sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81);
+
+ /* Will reset for the following suspend abort cases.
+ * 1) Only reset limit on APU side, dGPU hasn't checked yet.
+ * 2) S3 suspend abort and TOS already launched.
+ */
+ if (adev->flags & AMD_IS_APU && adev->in_s3 &&
+ !adev->suspend_complete &&
+ sol_reg)
+ return true;
+
+ return false;
+}
+
static int soc15_common_resume(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (soc15_need_reset_on_resume(adev)) {
+ dev_info(adev->dev, "S3 suspend abort case, let's reset ASIC.\n");
+ soc15_asic_reset(adev);
+ }
return soc15_common_hw_init(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
index b08905d1c..07a5d95be 100644
--- a/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/tonga_ih.c
@@ -219,6 +219,12 @@ static u32 tonga_ih_get_wptr(struct amdgpu_device *adev,
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
WREG32(mmIH_RB_CNTL, tmp);
+ /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
+ * can be detected.
+ */
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
+ WREG32(mmIH_RB_CNTL, tmp);
+
out:
return (wptr & ih->ptr_mask);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
index 1e83db0c5..74c94df42 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega10_ih.c
@@ -373,6 +373,12 @@ static u32 vega10_ih_get_wptr(struct amdgpu_device *adev,
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
+ /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
+ * can be detected.
+ */
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
+ WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
+
out:
return (wptr & ih->ptr_mask);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
index 59dfca093..f1ba76c35 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_ih.c
@@ -424,6 +424,12 @@ static u32 vega20_ih_get_wptr(struct amdgpu_device *adev,
tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 1);
WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
+ /* Unset the CLEAR_OVERFLOW bit immediately so new overflows
+ * can be detected.
+ */
+ tmp = REG_SET_FIELD(tmp, IH_RB_CNTL, WPTR_OVERFLOW_CLEAR, 0);
+ WREG32_NO_KIQ(ih_regs->ih_rb_cntl, tmp);
+
out:
return (wptr & ih->ptr_mask);
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
index 208812512..7fa5e70f1 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c
@@ -380,14 +380,9 @@ static void svm_range_bo_release(struct kref *kref)
spin_lock(&svm_bo->list_lock);
}
spin_unlock(&svm_bo->list_lock);
- if (!dma_fence_is_signaled(&svm_bo->eviction_fence->base)) {
- /* We're not in the eviction worker.
- * Signal the fence and synchronize with any
- * pending eviction work.
- */
+ if (!dma_fence_is_signaled(&svm_bo->eviction_fence->base))
+ /* We're not in the eviction worker. Signal the fence. */
dma_fence_signal(&svm_bo->eviction_fence->base);
- cancel_work_sync(&svm_bo->eviction_work);
- }
dma_fence_put(&svm_bo->eviction_fence->base);
amdgpu_bo_unref(&svm_bo->bo);
kfree(svm_bo);
@@ -2246,8 +2241,10 @@ retry:
mutex_unlock(&svms->lock);
mmap_write_unlock(mm);
- /* Pairs with mmget in svm_range_add_list_work */
- mmput(mm);
+ /* Pairs with mmget in svm_range_add_list_work. If dropping the
+ * last mm refcount, schedule release work to avoid circular locking
+ */
+ mmput_async(mm);
spin_lock(&svms->deferred_list_lock);
}
@@ -2556,6 +2553,7 @@ svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
{
struct vm_area_struct *vma;
struct interval_tree_node *node;
+ struct rb_node *rb_node;
unsigned long start_limit, end_limit;
vma = find_vma(p->mm, addr << PAGE_SHIFT);
@@ -2578,16 +2576,15 @@ svm_range_get_range_boundaries(struct kfd_process *p, int64_t addr,
if (node) {
end_limit = min(end_limit, node->start);
/* Last range that ends before the fault address */
- node = container_of(rb_prev(&node->rb),
- struct interval_tree_node, rb);
+ rb_node = rb_prev(&node->rb);
} else {
/* Last range must end before addr because
* there was no range after addr
*/
- node = container_of(rb_last(&p->svms.objects.rb_root),
- struct interval_tree_node, rb);
+ rb_node = rb_last(&p->svms.objects.rb_root);
}
- if (node) {
+ if (rb_node) {
+ node = container_of(rb_node, struct interval_tree_node, rb);
if (node->last >= addr) {
WARN(1, "Overlap with prev node and page fault addr\n");
return -EFAULT;
@@ -3310,13 +3307,14 @@ svm_range_trigger_migration(struct mm_struct *mm, struct svm_range *prange,
int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence)
{
- if (!fence)
- return -EINVAL;
-
- if (dma_fence_is_signaled(&fence->base))
- return 0;
-
- if (fence->svm_bo) {
+ /* Dereferencing fence->svm_bo is safe here because the fence hasn't
+ * signaled yet and we're under the protection of the fence->lock.
+ * After the fence is signaled in svm_range_bo_release, we cannot get
+ * here any more.
+ *
+ * Reference is dropped in svm_range_evict_svm_bo_worker.
+ */
+ if (svm_bo_ref_unless_zero(fence->svm_bo)) {
WRITE_ONCE(fence->svm_bo->evicting, 1);
schedule_work(&fence->svm_bo->eviction_work);
}
@@ -3331,8 +3329,6 @@ static void svm_range_evict_svm_bo_worker(struct work_struct *work)
int r = 0;
svm_bo = container_of(work, struct svm_range_bo, eviction_work);
- if (!svm_bo_ref_unless_zero(svm_bo))
- return; /* svm_bo was freed while eviction was pending */
if (mmget_not_zero(svm_bo->eviction_fence->mm)) {
mm = svm_bo->eviction_fence->mm;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 705d9e91b..029916971 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -1513,17 +1513,19 @@ static int kfd_add_peer_prop(struct kfd_topology_device *kdev,
/* CPU->CPU link*/
cpu_dev = kfd_topology_device_by_proximity_domain(iolink1->node_to);
if (cpu_dev) {
- list_for_each_entry(iolink3, &cpu_dev->io_link_props, list)
- if (iolink3->node_to == iolink2->node_to)
- break;
-
- props->weight += iolink3->weight;
- props->min_latency += iolink3->min_latency;
- props->max_latency += iolink3->max_latency;
- props->min_bandwidth = min(props->min_bandwidth,
- iolink3->min_bandwidth);
- props->max_bandwidth = min(props->max_bandwidth,
- iolink3->max_bandwidth);
+ list_for_each_entry(iolink3, &cpu_dev->io_link_props, list) {
+ if (iolink3->node_to != iolink2->node_to)
+ continue;
+
+ props->weight += iolink3->weight;
+ props->min_latency += iolink3->min_latency;
+ props->max_latency += iolink3->max_latency;
+ props->min_bandwidth = min(props->min_bandwidth,
+ iolink3->min_bandwidth);
+ props->max_bandwidth = min(props->max_bandwidth,
+ iolink3->max_bandwidth);
+ break;
+ }
} else {
WARN(1, "CPU node not found");
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index f02e509d5..a6c6f286a 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -2255,6 +2255,7 @@ static int dm_sw_fini(void *handle)
if (adev->dm.dmub_srv) {
dmub_srv_destroy(adev->dm.dmub_srv);
+ kfree(adev->dm.dmub_srv);
adev->dm.dmub_srv = NULL;
}
@@ -5937,6 +5938,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
bool scale = dm_state ? (dm_state->scaling != RMX_OFF) : false;
int mode_refresh;
int preferred_refresh = 0;
+ enum color_transfer_func tf = TRANSFER_FUNC_UNKNOWN;
#if defined(CONFIG_DRM_AMD_DC_DCN)
struct dsc_dec_dpcd_caps dsc_caps;
#endif
@@ -6001,7 +6003,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
if (recalculate_timing) {
freesync_mode = get_highest_refresh_rate_mode(aconnector, false);
drm_mode_copy(&saved_mode, &mode);
+ saved_mode.picture_aspect_ratio = mode.picture_aspect_ratio;
drm_mode_copy(&mode, freesync_mode);
+ mode.picture_aspect_ratio = saved_mode.picture_aspect_ratio;
} else {
decide_crtc_timing_for_drm_display_mode(
&mode, preferred_mode, scale);
@@ -6068,7 +6072,9 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
stream->use_vsc_sdp_for_colorimetry = true;
}
- mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space);
+ if (stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22)
+ tf = TRANSFER_FUNC_GAMMA_22;
+ mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf);
aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY;
}
@@ -10117,11 +10123,13 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
}
#if defined(CONFIG_DRM_AMD_DC_DCN)
- ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars);
- if (ret) {
- DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
- ret = -EINVAL;
- goto fail;
+ if (dc_resource_is_dsc_encoding_supported(dc)) {
+ ret = compute_mst_dsc_configs_for_state(state, dm_state->context, vars);
+ if (ret) {
+ DRM_DEBUG_DRIVER("compute_mst_dsc_configs_for_state() failed\n");
+ ret = -EINVAL;
+ goto fail;
+ }
}
ret = dm_update_mst_vcpi_slots_for_dsc(state, dm_state->context, vars);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 7a309547c..f415733f1 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -1903,6 +1903,10 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
wait_for_no_pipes_pending(dc, context);
/* pplib is notified if disp_num changed */
dc->hwss.optimize_bandwidth(dc, context);
+ /* Need to do otg sync again as otg could be out of sync due to otg
+ * workaround applied during clock update
+ */
+ dc_trigger_sync(dc, context);
}
if (dc->hwss.update_dsc_pg)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c
index af110bf94..aefca9756 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dpcd.c
@@ -202,7 +202,7 @@ enum dc_status core_link_read_dpcd(
uint32_t extended_size;
/* size of the remaining partitioned address space */
uint32_t size_left_to_read;
- enum dc_status status;
+ enum dc_status status = DC_ERROR_UNEXPECTED;
/* size of the next partition to be read from */
uint32_t partition_size;
uint32_t data_index = 0;
@@ -231,7 +231,7 @@ enum dc_status core_link_write_dpcd(
{
uint32_t partition_size;
uint32_t data_index = 0;
- enum dc_status status;
+ enum dc_status status = DC_ERROR_UNEXPECTED;
while (size) {
partition_size = dpcd_get_next_partition_size(address, size);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 66923f510..e2f80cd0c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -3038,6 +3038,12 @@ static void set_avi_info_frame(
hdmi_info.bits.C0_C1 = COLORIMETRY_EXTENDED;
}
+ if (pixel_encoding && color_space == COLOR_SPACE_2020_YCBCR &&
+ stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) {
+ hdmi_info.bits.EC0_EC2 = 0;
+ hdmi_info.bits.C0_C1 = COLORIMETRY_ITU709;
+ }
+
/* TODO: un-hardcode aspect ratio */
aspect = stream->timing.aspect_ratio;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index 46c2b991a..811c11766 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -244,7 +244,7 @@ enum pixel_format {
#define DC_MAX_DIRTY_RECTS 3
struct dc_flip_addrs {
struct dc_plane_address address;
- unsigned int flip_timestamp_in_us;
+ unsigned long long flip_timestamp_in_us;
bool flip_immediate;
/* TODO: add flip duration for FreeSync */
bool triplebuffer_flips;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
index f04595b75..5ec3f50a7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c
@@ -1014,7 +1014,7 @@ static struct stream_encoder *dcn301_stream_encoder_create(enum engine_id eng_id
vpg = dcn301_vpg_create(ctx, vpg_inst);
afmt = dcn301_afmt_create(ctx, afmt_inst);
- if (!enc1 || !vpg || !afmt) {
+ if (!enc1 || !vpg || !afmt || eng_id >= ARRAY_SIZE(stream_enc_regs)) {
kfree(enc1);
kfree(vpg);
kfree(afmt);
diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile
index ca7d24000..6c7b286e1 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile
@@ -51,8 +51,12 @@ endif
endif
ifneq ($(CONFIG_FRAME_WARN),0)
+ifeq ($(filter y,$(CONFIG_KASAN)$(CONFIG_KCSAN)),y)
+frame_warn_flag := -Wframe-larger-than=3072
+else
frame_warn_flag := -Wframe-larger-than=2048
endif
+endif
CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags)
@@ -60,11 +64,11 @@ ifdef CONFIG_DRM_AMD_DC_DCN
CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_vba.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn10/dcn10_fpu.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/dcn20_fpu.o := $(dml_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_ccflags) $(frame_warn_flag)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20.o := $(dml_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20v2.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20v2.o := $(dml_ccflags) $(frame_warn_flag)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn20/display_rq_dlg_calc_20v2.o := $(dml_ccflags)
-CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_mode_vba_21.o := $(dml_ccflags)
+CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_mode_vba_21.o := $(dml_ccflags) $(frame_warn_flag)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn21/display_rq_dlg_calc_21.o := $(dml_ccflags)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_mode_vba_30.o := $(dml_ccflags) $(frame_warn_flag)
CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_rq_dlg_calc_30.o := $(dml_ccflags)
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
index 19f556572..cc8c1a48c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_32.c
@@ -810,6 +810,8 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman
(v->DRAMSpeedPerState[mode_lib->vba.VoltageLevel] <= MEM_STROBE_FREQ_MHZ ||
v->DCFCLKPerState[mode_lib->vba.VoltageLevel] <= DCFCLK_FREQ_EXTRA_PREFETCH_REQ_MHZ) ?
mode_lib->vba.ip.min_prefetch_in_strobe_us : 0,
+ mode_lib->vba.PrefetchModePerState[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] > 0 || mode_lib->vba.DRAMClockChangeRequirementFinal == false,
+
/* Output */
&v->DSTXAfterScaler[k],
&v->DSTYAfterScaler[k],
@@ -3291,6 +3293,7 @@ void dml32_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l
v->SwathHeightCThisState[k], v->TWait,
(v->DRAMSpeedPerState[i] <= MEM_STROBE_FREQ_MHZ || v->DCFCLKState[i][j] <= DCFCLK_FREQ_EXTRA_PREFETCH_REQ_MHZ) ?
mode_lib->vba.ip.min_prefetch_in_strobe_us : 0,
+ mode_lib->vba.PrefetchModePerState[i][j] > 0 || mode_lib->vba.DRAMClockChangeRequirementFinal == false,
/* Output */
&v->dummy_vars.dml32_ModeSupportAndSystemConfigurationFull.DSTXAfterScaler[k],
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
index 23e4be2ad..7f4fc49be 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.c
@@ -3418,6 +3418,7 @@ bool dml32_CalculatePrefetchSchedule(
unsigned int SwathHeightC,
double TWait,
double TPreReq,
+ bool ExtendPrefetchIfPossible,
/* Output */
double *DSTXAfterScaler,
double *DSTYAfterScaler,
@@ -3887,12 +3888,32 @@ bool dml32_CalculatePrefetchSchedule(
/* Clamp to oto for bandwidth calculation */
LinesForPrefetchBandwidth = dst_y_prefetch_oto;
} else {
- *DestinationLinesForPrefetch = dst_y_prefetch_equ;
- TimeForFetchingMetaPTE = Tvm_equ;
- TimeForFetchingRowInVBlank = Tr0_equ;
- *PrefetchBandwidth = prefetch_bw_equ;
- /* Clamp to equ for bandwidth calculation */
- LinesForPrefetchBandwidth = dst_y_prefetch_equ;
+ /* For mode programming we want to extend the prefetch as much as possible
+ * (up to oto, or as long as we can for equ) if we're not already applying
+ * the 60us prefetch requirement. This is to avoid intermittent underflow
+ * issues during prefetch.
+ *
+ * The prefetch extension is applied under the following scenarios:
+ * 1. We're in prefetch mode > 0 (i.e. we don't support MCLK switch in blank)
+ * 2. We're using subvp or drr methods of p-state switch, in which case we
+ * we don't care if prefetch takes up more of the blanking time
+ *
+ * Mode programming typically chooses the smallest prefetch time possible
+ * (i.e. highest bandwidth during prefetch) presumably to create margin between
+ * p-states / c-states that happen in vblank and prefetch. Therefore we only
+ * apply this prefetch extension when p-state in vblank is not required (UCLK
+ * p-states take up the most vblank time).
+ */
+ if (ExtendPrefetchIfPossible && TPreReq == 0 && VStartup < MaxVStartup) {
+ MyError = true;
+ } else {
+ *DestinationLinesForPrefetch = dst_y_prefetch_equ;
+ TimeForFetchingMetaPTE = Tvm_equ;
+ TimeForFetchingRowInVBlank = Tr0_equ;
+ *PrefetchBandwidth = prefetch_bw_equ;
+ /* Clamp to equ for bandwidth calculation */
+ LinesForPrefetchBandwidth = dst_y_prefetch_equ;
+ }
}
*DestinationLinesToRequestVMInVBlank = dml_ceil(4.0 * TimeForFetchingMetaPTE / LineTime, 1.0) / 4.0;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
index 779c6805f..1823434d8 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/display_mode_vba_util_32.h
@@ -744,6 +744,7 @@ bool dml32_CalculatePrefetchSchedule(
unsigned int SwathHeightC,
double TWait,
double TPreReq,
+ bool ExtendPrefetchIfPossible,
/* Output */
double *DSTXAfterScaler,
double *DSTYAfterScaler,
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
index 1d8b746b0..edf5845f6 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
@@ -35,7 +35,8 @@ struct mod_vrr_params;
void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
struct dc_info_packet *info_packet,
- enum dc_color_space cs);
+ enum dc_color_space cs,
+ enum color_transfer_func tf);
void mod_build_hf_vsif_infopacket(const struct dc_stream_state *stream,
struct dc_info_packet *info_packet);
diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
index 27ceba9d6..69691058a 100644
--- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
+++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
@@ -132,7 +132,8 @@ enum ColorimetryYCCDP {
void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
struct dc_info_packet *info_packet,
- enum dc_color_space cs)
+ enum dc_color_space cs,
+ enum color_transfer_func tf)
{
unsigned int vsc_packet_revision = vsc_packet_undefined;
unsigned int i;
@@ -382,6 +383,9 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
colorimetryFormat = ColorimetryYCC_DP_AdobeYCC;
else if (cs == COLOR_SPACE_2020_YCBCR)
colorimetryFormat = ColorimetryYCC_DP_ITU2020YCbCr;
+
+ if (cs == COLOR_SPACE_2020_YCBCR && tf == TRANSFER_FUNC_GAMMA_22)
+ colorimetryFormat = ColorimetryYCC_DP_ITU709;
break;
default:
diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
index e2f436dea..67287ad07 100644
--- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
+++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c
@@ -818,8 +818,6 @@ bool is_psr_su_specific_panel(struct dc_link *link)
isPSRSUSupported = false;
else if (dpcd_caps->sink_dev_id_str[1] == 0x08 && dpcd_caps->sink_dev_id_str[0] == 0x03)
isPSRSUSupported = false;
- else if (dpcd_caps->sink_dev_id_str[1] == 0x08 && dpcd_caps->sink_dev_id_str[0] == 0x03)
- isPSRSUSupported = false;
else if (dpcd_caps->psr_info.force_psrsu_cap == 0x1)
isPSRSUSupported = true;
}
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
index dc0a6fba7..ff1032de4 100644
--- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
@@ -6925,6 +6925,23 @@ static int si_dpm_enable(struct amdgpu_device *adev)
return 0;
}
+static int si_set_temperature_range(struct amdgpu_device *adev)
+{
+ int ret;
+
+ ret = si_thermal_enable_alert(adev, false);
+ if (ret)
+ return ret;
+ ret = si_thermal_set_temperature_range(adev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
+ if (ret)
+ return ret;
+ ret = si_thermal_enable_alert(adev, true);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
static void si_dpm_disable(struct amdgpu_device *adev)
{
struct rv7xx_power_info *pi = rv770_get_pi(adev);
@@ -7608,6 +7625,18 @@ static int si_dpm_process_interrupt(struct amdgpu_device *adev,
static int si_dpm_late_init(void *handle)
{
+ int ret;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (!adev->pm.dpm_enabled)
+ return 0;
+
+ ret = si_set_temperature_range(adev);
+ if (ret)
+ return ret;
+#if 0 //TODO ?
+ si_dpm_powergate_uvd(adev, true);
+#endif
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c
index f2a55c141..17882f8df 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/process_pptables_v1_0.c
@@ -200,7 +200,7 @@ static int get_platform_power_management_table(
struct pp_hwmgr *hwmgr,
ATOM_Tonga_PPM_Table *atom_ppm_table)
{
- struct phm_ppm_table *ptr = kzalloc(sizeof(ATOM_Tonga_PPM_Table), GFP_KERNEL);
+ struct phm_ppm_table *ptr = kzalloc(sizeof(*ptr), GFP_KERNEL);
struct phm_ppt_v1_information *pp_table_information =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.c b/drivers/gpu/drm/bridge/analogix/anx7625.c
index 5f8137e9c..77a304ac4 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.c
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.c
@@ -1300,10 +1300,32 @@ static void anx7625_config(struct anx7625_data *ctx)
XTAL_FRQ_SEL, XTAL_FRQ_27M);
}
+static int anx7625_hpd_timer_config(struct anx7625_data *ctx)
+{
+ int ret;
+
+ /* Set irq detect window to 2ms */
+ ret = anx7625_reg_write(ctx, ctx->i2c.tx_p2_client,
+ HPD_DET_TIMER_BIT0_7, HPD_TIME & 0xFF);
+ ret |= anx7625_reg_write(ctx, ctx->i2c.tx_p2_client,
+ HPD_DET_TIMER_BIT8_15,
+ (HPD_TIME >> 8) & 0xFF);
+ ret |= anx7625_reg_write(ctx, ctx->i2c.tx_p2_client,
+ HPD_DET_TIMER_BIT16_23,
+ (HPD_TIME >> 16) & 0xFF);
+
+ return ret;
+}
+
+static int anx7625_read_hpd_gpio_config_status(struct anx7625_data *ctx)
+{
+ return anx7625_reg_read(ctx, ctx->i2c.rx_p0_client, GPIO_CTRL_2);
+}
+
static void anx7625_disable_pd_protocol(struct anx7625_data *ctx)
{
struct device *dev = &ctx->client->dev;
- int ret;
+ int ret, val;
/* Reset main ocm */
ret = anx7625_reg_write(ctx, ctx->i2c.rx_p0_client, 0x88, 0x40);
@@ -1317,6 +1339,19 @@ static void anx7625_disable_pd_protocol(struct anx7625_data *ctx)
DRM_DEV_DEBUG_DRIVER(dev, "disable PD feature fail.\n");
else
DRM_DEV_DEBUG_DRIVER(dev, "disable PD feature succeeded.\n");
+
+ /*
+ * Make sure the HPD GPIO already be configured after OCM release before
+ * setting HPD detect window register. Here we poll the status register
+ * at maximum 40ms, then config HPD irq detect window register
+ */
+ readx_poll_timeout(anx7625_read_hpd_gpio_config_status,
+ ctx, val,
+ ((val & HPD_SOURCE) || (val < 0)),
+ 2000, 2000 * 20);
+
+ /* Set HPD irq detect window to 2ms */
+ anx7625_hpd_timer_config(ctx);
}
static int anx7625_ocm_loading_check(struct anx7625_data *ctx)
@@ -1440,20 +1475,6 @@ static void anx7625_start_dp_work(struct anx7625_data *ctx)
static int anx7625_read_hpd_status_p0(struct anx7625_data *ctx)
{
- int ret;
-
- /* Set irq detect window to 2ms */
- ret = anx7625_reg_write(ctx, ctx->i2c.tx_p2_client,
- HPD_DET_TIMER_BIT0_7, HPD_TIME & 0xFF);
- ret |= anx7625_reg_write(ctx, ctx->i2c.tx_p2_client,
- HPD_DET_TIMER_BIT8_15,
- (HPD_TIME >> 8) & 0xFF);
- ret |= anx7625_reg_write(ctx, ctx->i2c.tx_p2_client,
- HPD_DET_TIMER_BIT16_23,
- (HPD_TIME >> 16) & 0xFF);
- if (ret < 0)
- return ret;
-
return anx7625_reg_read(ctx, ctx->i2c.rx_p0_client, SYSTEM_STSTUS);
}
diff --git a/drivers/gpu/drm/bridge/analogix/anx7625.h b/drivers/gpu/drm/bridge/analogix/anx7625.h
index 239956199..164250c8c 100644
--- a/drivers/gpu/drm/bridge/analogix/anx7625.h
+++ b/drivers/gpu/drm/bridge/analogix/anx7625.h
@@ -259,6 +259,10 @@
#define AP_MIPI_RX_EN BIT(5) /* 1: MIPI RX input in 0: no RX in */
#define AP_DISABLE_PD BIT(6)
#define AP_DISABLE_DISPLAY BIT(7)
+
+#define GPIO_CTRL_2 0x49
+#define HPD_SOURCE BIT(6)
+
/***************************************************************/
/* Register definition of device address 0x84 */
#define MIPI_PHY_CONTROL_3 0x03
diff --git a/drivers/gpu/drm/drm_buddy.c b/drivers/gpu/drm/drm_buddy.c
index 7098f125b..fd32041f8 100644
--- a/drivers/gpu/drm/drm_buddy.c
+++ b/drivers/gpu/drm/drm_buddy.c
@@ -332,6 +332,7 @@ alloc_range_bias(struct drm_buddy *mm,
u64 start, u64 end,
unsigned int order)
{
+ u64 req_size = mm->chunk_size << order;
struct drm_buddy_block *block;
struct drm_buddy_block *buddy;
LIST_HEAD(dfs);
@@ -367,6 +368,15 @@ alloc_range_bias(struct drm_buddy *mm,
if (drm_buddy_block_is_allocated(block))
continue;
+ if (block_start < start || block_end > end) {
+ u64 adjusted_start = max(block_start, start);
+ u64 adjusted_end = min(block_end, end);
+
+ if (round_down(adjusted_end + 1, req_size) <=
+ round_up(adjusted_start, req_size))
+ continue;
+ }
+
if (contains(start, end, block_start, block_end) &&
order == drm_buddy_block_order(block)) {
/*
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index a8b4d918e..d6a057298 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -406,7 +406,7 @@ int drm_open(struct inode *inode, struct file *filp)
{
struct drm_device *dev;
struct drm_minor *minor;
- int retcode;
+ int retcode = 0;
int need_setup = 0;
minor = drm_minor_acquire(iminor(inode));
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index 2dd97473c..72ad1715f 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -570,7 +570,7 @@ int drm_mode_getfb2_ioctl(struct drm_device *dev,
struct drm_mode_fb_cmd2 *r = data;
struct drm_framebuffer *fb;
unsigned int i;
- int ret;
+ int ret = 0;
if (!drm_core_check_feature(dev, DRIVER_MODESET))
return -EINVAL;
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index 853208e8d..ef7ec6886 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -346,7 +346,8 @@ static int mipi_dsi_remove_device_fn(struct device *dev, void *priv)
{
struct mipi_dsi_device *dsi = to_mipi_dsi_device(dev);
- mipi_dsi_detach(dsi);
+ if (dsi->attached)
+ mipi_dsi_detach(dsi);
mipi_dsi_device_unregister(dsi);
return 0;
@@ -369,11 +370,18 @@ EXPORT_SYMBOL(mipi_dsi_host_unregister);
int mipi_dsi_attach(struct mipi_dsi_device *dsi)
{
const struct mipi_dsi_host_ops *ops = dsi->host->ops;
+ int ret;
if (!ops || !ops->attach)
return -ENOSYS;
- return ops->attach(dsi->host, dsi);
+ ret = ops->attach(dsi->host, dsi);
+ if (ret)
+ return ret;
+
+ dsi->attached = true;
+
+ return 0;
}
EXPORT_SYMBOL(mipi_dsi_attach);
@@ -385,9 +393,14 @@ int mipi_dsi_detach(struct mipi_dsi_device *dsi)
{
const struct mipi_dsi_host_ops *ops = dsi->host->ops;
+ if (WARN_ON(!dsi->attached))
+ return -EINVAL;
+
if (!ops || !ops->detach)
return -ENOSYS;
+ dsi->attached = false;
+
return ops->detach(dsi->host, dsi);
}
EXPORT_SYMBOL(mipi_dsi_detach);
diff --git a/drivers/gpu/drm/drm_prime.c b/drivers/gpu/drm/drm_prime.c
index eb09e8604..68a6d4b0e 100644
--- a/drivers/gpu/drm/drm_prime.c
+++ b/drivers/gpu/drm/drm_prime.c
@@ -828,7 +828,7 @@ struct sg_table *drm_prime_pages_to_sg(struct drm_device *dev,
if (max_segment == 0)
max_segment = UINT_MAX;
err = sg_alloc_table_from_pages_segment(sg, pages, nr_pages, 0,
- nr_pages << PAGE_SHIFT,
+ (unsigned long)nr_pages << PAGE_SHIFT,
max_segment, GFP_KERNEL);
if (err) {
kfree(sg);
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index da0145bc1..8f2737075 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -980,7 +980,8 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
uint64_t *points;
uint32_t signaled_count, i;
- if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT)
+ if (flags & (DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE))
lockdep_assert_none_held_once();
points = kmalloc_array(count, sizeof(*points), GFP_KERNEL);
@@ -1049,7 +1050,8 @@ static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
* fallthough and try a 0 timeout wait!
*/
- if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
+ if (flags & (DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
+ DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE)) {
for (i = 0; i < count; ++i)
drm_syncobj_fence_add_wait(syncobjs[i], &entries[i]);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 16c539657..4095b0d3a 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -309,6 +309,7 @@ err_mode_config_cleanup:
drm_mode_config_cleanup(drm);
exynos_drm_cleanup_dma(drm);
kfree(private);
+ dev_set_drvdata(dev, NULL);
err_free_drm:
drm_dev_put(drm);
@@ -323,6 +324,7 @@ static void exynos_drm_unbind(struct device *dev)
exynos_drm_fbdev_fini(drm);
drm_kms_helper_poll_fini(drm);
+ drm_atomic_helper_shutdown(drm);
component_unbind_all(drm->dev, drm);
drm_mode_config_cleanup(drm);
@@ -360,9 +362,18 @@ static int exynos_drm_platform_remove(struct platform_device *pdev)
return 0;
}
+static void exynos_drm_platform_shutdown(struct platform_device *pdev)
+{
+ struct drm_device *drm = platform_get_drvdata(pdev);
+
+ if (drm)
+ drm_atomic_helper_shutdown(drm);
+}
+
static struct platform_driver exynos_drm_platform_driver = {
.probe = exynos_drm_platform_probe,
.remove = exynos_drm_platform_remove,
+ .shutdown = exynos_drm_platform_shutdown,
.driver = {
.name = "exynos-drm",
.pm = &exynos_drm_pm_ops,
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index daac2050d..6f531bb61 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -2844,8 +2844,7 @@ static int handle_mmio(struct intel_gvt_mmio_table_iter *iter, u32 offset,
for (i = start; i < end; i += 4) {
p = intel_gvt_find_mmio_info(gvt, i);
if (p) {
- WARN(1, "dup mmio definition offset %x\n",
- info->offset);
+ WARN(1, "dup mmio definition offset %x\n", i);
/* We return -EEXIST here to make GVT-g load fail.
* So duplicated MMIO can be found as soon as
diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c
index 119544d88..fbac39aa3 100644
--- a/drivers/gpu/drm/meson/meson_drv.c
+++ b/drivers/gpu/drm/meson/meson_drv.c
@@ -316,32 +316,34 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
goto exit_afbcd;
if (has_components) {
- ret = component_bind_all(drm->dev, drm);
+ ret = component_bind_all(dev, drm);
if (ret) {
dev_err(drm->dev, "Couldn't bind all components\n");
+ /* Do not try to unbind */
+ has_components = false;
goto exit_afbcd;
}
}
ret = meson_encoder_hdmi_init(priv);
if (ret)
- goto unbind_all;
+ goto exit_afbcd;
ret = meson_plane_create(priv);
if (ret)
- goto unbind_all;
+ goto exit_afbcd;
ret = meson_overlay_create(priv);
if (ret)
- goto unbind_all;
+ goto exit_afbcd;
ret = meson_crtc_create(priv);
if (ret)
- goto unbind_all;
+ goto exit_afbcd;
ret = request_irq(priv->vsync_irq, meson_irq, 0, drm->driver->name, drm);
if (ret)
- goto unbind_all;
+ goto exit_afbcd;
drm_mode_config_reset(drm);
@@ -359,15 +361,18 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
uninstall_irq:
free_irq(priv->vsync_irq, drm);
-unbind_all:
- if (has_components)
- component_unbind_all(drm->dev, drm);
exit_afbcd:
if (priv->afbcd.ops)
priv->afbcd.ops->exit(priv);
free_drm:
drm_dev_put(drm);
+ meson_encoder_hdmi_remove(priv);
+ meson_encoder_cvbs_remove(priv);
+
+ if (has_components)
+ component_unbind_all(dev, drm);
+
return ret;
}
diff --git a/drivers/gpu/drm/meson/meson_encoder_cvbs.c b/drivers/gpu/drm/meson/meson_encoder_cvbs.c
index 3f73b211f..340745043 100644
--- a/drivers/gpu/drm/meson/meson_encoder_cvbs.c
+++ b/drivers/gpu/drm/meson/meson_encoder_cvbs.c
@@ -294,6 +294,5 @@ void meson_encoder_cvbs_remove(struct meson_drm *priv)
if (priv->encoders[MESON_ENC_CVBS]) {
meson_encoder_cvbs = priv->encoders[MESON_ENC_CVBS];
drm_bridge_remove(&meson_encoder_cvbs->bridge);
- drm_bridge_remove(meson_encoder_cvbs->next_bridge);
}
}
diff --git a/drivers/gpu/drm/meson/meson_encoder_hdmi.c b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
index b14e6e507..03062e7a0 100644
--- a/drivers/gpu/drm/meson/meson_encoder_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_encoder_hdmi.c
@@ -472,6 +472,5 @@ void meson_encoder_hdmi_remove(struct meson_drm *priv)
if (priv->encoders[MESON_ENC_HDMI]) {
meson_encoder_hdmi = priv->encoders[MESON_ENC_HDMI];
drm_bridge_remove(&meson_encoder_hdmi->bridge);
- drm_bridge_remove(meson_encoder_hdmi->next_bridge);
}
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index b0eb881f8..25245ef38 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -38,6 +38,9 @@
#define DPU_ERROR_ENC(e, fmt, ...) DPU_ERROR("enc%d " fmt,\
(e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
+#define DPU_ERROR_ENC_RATELIMITED(e, fmt, ...) DPU_ERROR_RATELIMITED("enc%d " fmt,\
+ (e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
+
/*
* Two to anticipate panels that can do cmd/vid dynamic switching
* plan is to create all possible physical encoder types, and switch between
@@ -2050,7 +2053,7 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
}
/* reset the merge 3D HW block */
- if (phys_enc->hw_pp->merge_3d) {
+ if (phys_enc->hw_pp && phys_enc->hw_pp->merge_3d) {
phys_enc->hw_pp->merge_3d->ops.setup_3d_mode(phys_enc->hw_pp->merge_3d,
BLEND_3D_NONE);
if (phys_enc->hw_ctl->ops.update_pending_flush_merge_3d)
@@ -2066,7 +2069,7 @@ void dpu_encoder_helper_phys_cleanup(struct dpu_encoder_phys *phys_enc)
if (phys_enc->hw_wb)
intf_cfg.wb = phys_enc->hw_wb->idx;
- if (phys_enc->hw_pp->merge_3d)
+ if (phys_enc->hw_pp && phys_enc->hw_pp->merge_3d)
intf_cfg.merge_3d = phys_enc->hw_pp->merge_3d->idx;
if (ctl->ops.reset_intf_cfg)
@@ -2385,7 +2388,7 @@ static void dpu_encoder_frame_done_timeout(struct timer_list *t)
return;
}
- DPU_ERROR_ENC(dpu_enc, "frame done timeout\n");
+ DPU_ERROR_ENC_RATELIMITED(dpu_enc, "frame done timeout\n");
event = DPU_ENCODER_FRAME_EVENT_ERROR;
trace_dpu_enc_frame_done_timeout(DRMID(drm_enc), event);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c
index a3e413d27..63dc2ee44 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_wb.c
@@ -105,6 +105,9 @@ static void dpu_hw_wb_setup_format(struct dpu_hw_wb *ctx,
dst_format |= BIT(14); /* DST_ALPHA_X */
}
+ if (DPU_FORMAT_IS_YUV(fmt))
+ dst_format |= BIT(15);
+
pattern = (fmt->element[3] << 24) |
(fmt->element[2] << 16) |
(fmt->element[1] << 8) |
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
index ed80ed678..bb35aa5f5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
@@ -51,6 +51,7 @@
} while (0)
#define DPU_ERROR(fmt, ...) pr_err("[dpu error]" fmt, ##__VA_ARGS__)
+#define DPU_ERROR_RATELIMITED(fmt, ...) pr_err_ratelimited("[dpu error]" fmt, ##__VA_ARGS__)
/**
* ktime_compare_safe - compare two ktime structures
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index 103eef9f0..b20701893 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -133,11 +133,6 @@ static void dp_ctrl_config_ctrl(struct dp_ctrl_private *ctrl)
tbd = dp_link_get_test_bits_depth(ctrl->link,
ctrl->panel->dp_mode.bpp);
- if (tbd == DP_TEST_BIT_DEPTH_UNKNOWN) {
- pr_debug("BIT_DEPTH not set. Configure default\n");
- tbd = DP_TEST_BIT_DEPTH_8;
- }
-
config |= tbd << DP_CONFIGURATION_CTRL_BPC_SHIFT;
/* Num of Lanes */
diff --git a/drivers/gpu/drm/msm/dp/dp_link.c b/drivers/gpu/drm/msm/dp/dp_link.c
index cb66d1126..ceb382fa5 100644
--- a/drivers/gpu/drm/msm/dp/dp_link.c
+++ b/drivers/gpu/drm/msm/dp/dp_link.c
@@ -7,6 +7,7 @@
#include <drm/drm_print.h>
+#include "dp_reg.h"
#include "dp_link.h"
#include "dp_panel.h"
@@ -1075,7 +1076,7 @@ int dp_link_process_request(struct dp_link *dp_link)
int dp_link_get_colorimetry_config(struct dp_link *dp_link)
{
- u32 cc;
+ u32 cc = DP_MISC0_COLORIMERY_CFG_LEGACY_RGB;
struct dp_link_private *link;
if (!dp_link) {
@@ -1089,10 +1090,11 @@ int dp_link_get_colorimetry_config(struct dp_link *dp_link)
* Unless a video pattern CTS test is ongoing, use RGB_VESA
* Only RGB_VESA and RGB_CEA supported for now
*/
- if (dp_link_is_video_pattern_requested(link))
- cc = link->dp_link.test_video.test_dyn_range;
- else
- cc = DP_TEST_DYNAMIC_RANGE_VESA;
+ if (dp_link_is_video_pattern_requested(link)) {
+ if (link->dp_link.test_video.test_dyn_range &
+ DP_TEST_DYNAMIC_RANGE_CEA)
+ cc = DP_MISC0_COLORIMERY_CFG_CEA_RGB;
+ }
return cc;
}
@@ -1172,6 +1174,9 @@ void dp_link_reset_phy_params_vx_px(struct dp_link *dp_link)
u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp)
{
u32 tbd;
+ struct dp_link_private *link;
+
+ link = container_of(dp_link, struct dp_link_private, dp_link);
/*
* Few simplistic rules and assumptions made here:
@@ -1189,12 +1194,13 @@ u32 dp_link_get_test_bits_depth(struct dp_link *dp_link, u32 bpp)
tbd = DP_TEST_BIT_DEPTH_10;
break;
default:
- tbd = DP_TEST_BIT_DEPTH_UNKNOWN;
+ drm_dbg_dp(link->drm_dev, "bpp=%d not supported, use bpc=8\n",
+ bpp);
+ tbd = DP_TEST_BIT_DEPTH_8;
break;
}
- if (tbd != DP_TEST_BIT_DEPTH_UNKNOWN)
- tbd = (tbd >> DP_TEST_BIT_DEPTH_SHIFT);
+ tbd = (tbd >> DP_TEST_BIT_DEPTH_SHIFT);
return tbd;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_reg.h b/drivers/gpu/drm/msm/dp/dp_reg.h
index 268602803..176a503ec 100644
--- a/drivers/gpu/drm/msm/dp/dp_reg.h
+++ b/drivers/gpu/drm/msm/dp/dp_reg.h
@@ -129,6 +129,9 @@
#define DP_MISC0_COLORIMETRY_CFG_SHIFT (0x00000001)
#define DP_MISC0_TEST_BITS_DEPTH_SHIFT (0x00000005)
+#define DP_MISC0_COLORIMERY_CFG_LEGACY_RGB (0)
+#define DP_MISC0_COLORIMERY_CFG_CEA_RGB (0x04)
+
#define REG_DP_VALID_BOUNDARY (0x00000030)
#define REG_DP_VALID_BOUNDARY_2 (0x00000034)
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index 62bc3756f..c0bcf020e 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -673,6 +673,10 @@ static int dsi_phy_driver_probe(struct platform_device *pdev)
return dev_err_probe(dev, PTR_ERR(phy->ahb_clk),
"Unable to get ahb clk\n");
+ ret = devm_pm_runtime_enable(&pdev->dev);
+ if (ret)
+ return ret;
+
/* PLL init will call into clk_register which requires
* register access, so we need to enable power and ahb clock.
*/
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index d12ba47b3..0de361213 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -21,6 +21,8 @@ struct msm_iommu_pagetable {
struct msm_mmu base;
struct msm_mmu *parent;
struct io_pgtable_ops *pgtbl_ops;
+ const struct iommu_flush_ops *tlb;
+ struct device *iommu_dev;
unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */
phys_addr_t ttbr;
u32 asid;
@@ -194,11 +196,33 @@ static const struct msm_mmu_funcs pagetable_funcs = {
static void msm_iommu_tlb_flush_all(void *cookie)
{
+ struct msm_iommu_pagetable *pagetable = cookie;
+ struct adreno_smmu_priv *adreno_smmu;
+
+ if (!pm_runtime_get_if_in_use(pagetable->iommu_dev))
+ return;
+
+ adreno_smmu = dev_get_drvdata(pagetable->parent->dev);
+
+ pagetable->tlb->tlb_flush_all((void *)adreno_smmu->cookie);
+
+ pm_runtime_put_autosuspend(pagetable->iommu_dev);
}
static void msm_iommu_tlb_flush_walk(unsigned long iova, size_t size,
size_t granule, void *cookie)
{
+ struct msm_iommu_pagetable *pagetable = cookie;
+ struct adreno_smmu_priv *adreno_smmu;
+
+ if (!pm_runtime_get_if_in_use(pagetable->iommu_dev))
+ return;
+
+ adreno_smmu = dev_get_drvdata(pagetable->parent->dev);
+
+ pagetable->tlb->tlb_flush_walk(iova, size, granule, (void *)adreno_smmu->cookie);
+
+ pm_runtime_put_autosuspend(pagetable->iommu_dev);
}
static void msm_iommu_tlb_add_page(struct iommu_iotlb_gather *gather,
@@ -206,7 +230,7 @@ static void msm_iommu_tlb_add_page(struct iommu_iotlb_gather *gather,
{
}
-static const struct iommu_flush_ops null_tlb_ops = {
+static const struct iommu_flush_ops tlb_ops = {
.tlb_flush_all = msm_iommu_tlb_flush_all,
.tlb_flush_walk = msm_iommu_tlb_flush_walk,
.tlb_add_page = msm_iommu_tlb_add_page,
@@ -254,10 +278,10 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
/* The incoming cfg will have the TTBR1 quirk enabled */
ttbr0_cfg.quirks &= ~IO_PGTABLE_QUIRK_ARM_TTBR1;
- ttbr0_cfg.tlb = &null_tlb_ops;
+ ttbr0_cfg.tlb = &tlb_ops;
pagetable->pgtbl_ops = alloc_io_pgtable_ops(ARM_64_LPAE_S1,
- &ttbr0_cfg, iommu->domain);
+ &ttbr0_cfg, pagetable);
if (!pagetable->pgtbl_ops) {
kfree(pagetable);
@@ -282,6 +306,8 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
/* Needed later for TLB flush */
pagetable->parent = parent;
+ pagetable->tlb = ttbr1_cfg->tlb;
+ pagetable->iommu_dev = ttbr1_cfg->iommu_dev;
pagetable->pgsize_bitmap = ttbr0_cfg.pgsize_bitmap;
pagetable->ttbr = ttbr0_cfg.arm_lpae_s1_cfg.ttbr;
diff --git a/drivers/gpu/drm/nouveau/nouveau_svm.c b/drivers/gpu/drm/nouveau/nouveau_svm.c
index 31a5b81ee..be6674fb1 100644
--- a/drivers/gpu/drm/nouveau/nouveau_svm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_svm.c
@@ -997,7 +997,7 @@ nouveau_svm_fault_buffer_ctor(struct nouveau_svm *svm, s32 oclass, int id)
if (ret)
return ret;
- buffer->fault = kvcalloc(sizeof(*buffer->fault), buffer->entries, GFP_KERNEL);
+ buffer->fault = kvcalloc(buffer->entries, sizeof(*buffer->fault), GFP_KERNEL);
if (!buffer->fault)
return -ENOMEM;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
index 19188683c..8c2bf1c16 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadow.c
@@ -154,11 +154,17 @@ shadow_fw_init(struct nvkm_bios *bios, const char *name)
return (void *)fw;
}
+static void
+shadow_fw_release(void *fw)
+{
+ release_firmware(fw);
+}
+
static const struct nvbios_source
shadow_fw = {
.name = "firmware",
.init = shadow_fw_init,
- .fini = (void(*)(void *))release_firmware,
+ .fini = shadow_fw_release,
.read = shadow_fw_read,
.rw = false,
};
diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c
index ecd623874..42584d8a9 100644
--- a/drivers/gpu/drm/panel/panel-edp.c
+++ b/drivers/gpu/drm/panel/panel-edp.c
@@ -203,6 +203,9 @@ struct edp_panel_entry {
/** @name: Name of this panel (for printing to logs). */
const char *name;
+
+ /** @override_edid_mode: Override the mode obtained by edid. */
+ const struct drm_display_mode *override_edid_mode;
};
struct panel_edp {
@@ -301,6 +304,24 @@ static unsigned int panel_edp_get_display_modes(struct panel_edp *panel,
return num;
}
+static int panel_edp_override_edid_mode(struct panel_edp *panel,
+ struct drm_connector *connector,
+ const struct drm_display_mode *override_mode)
+{
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, override_mode);
+ if (!mode) {
+ dev_err(panel->base.dev, "failed to add additional mode\n");
+ return 0;
+ }
+
+ mode->type |= DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_set_name(mode);
+ drm_mode_probed_add(connector, mode);
+ return 1;
+}
+
static int panel_edp_get_non_edid_modes(struct panel_edp *panel,
struct drm_connector *connector)
{
@@ -568,6 +589,9 @@ static int panel_edp_get_modes(struct drm_panel *panel,
{
struct panel_edp *p = to_panel_edp(panel);
int num = 0;
+ bool has_override_edid_mode = p->detected_panel &&
+ p->detected_panel != ERR_PTR(-EINVAL) &&
+ p->detected_panel->override_edid_mode;
/* probe EDID if a DDC bus is available */
if (p->ddc) {
@@ -575,9 +599,18 @@ static int panel_edp_get_modes(struct drm_panel *panel,
if (!p->edid)
p->edid = drm_get_edid(connector, p->ddc);
-
- if (p->edid)
- num += drm_add_edid_modes(connector, p->edid);
+ if (p->edid) {
+ if (has_override_edid_mode) {
+ /*
+ * override_edid_mode is specified. Use
+ * override_edid_mode instead of from edid.
+ */
+ num += panel_edp_override_edid_mode(p, connector,
+ p->detected_panel->override_edid_mode);
+ } else {
+ num += drm_add_edid_modes(connector, p->edid);
+ }
+ }
pm_runtime_mark_last_busy(panel->dev);
pm_runtime_put_autosuspend(panel->dev);
@@ -1861,6 +1894,15 @@ static const struct panel_delay delay_200_500_e200 = {
.delay = _delay \
}
+#define EDP_PANEL_ENTRY2(vend_chr_0, vend_chr_1, vend_chr_2, product_id, _delay, _name, _mode) \
+{ \
+ .name = _name, \
+ .panel_id = drm_edid_encode_panel_id(vend_chr_0, vend_chr_1, vend_chr_2, \
+ product_id), \
+ .delay = _delay, \
+ .override_edid_mode = _mode \
+}
+
/*
* This table is used to figure out power sequencing delays for panels that
* are detected by EDID. Entries here may point to entries in the
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 5fc55b977..6806779f8 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -1252,9 +1252,26 @@ static int host1x_drm_probe(struct host1x_device *dev)
drm_mode_config_reset(drm);
- err = drm_aperture_remove_framebuffers(&tegra_drm_driver);
- if (err < 0)
- goto hub;
+ /*
+ * Only take over from a potential firmware framebuffer if any CRTCs
+ * have been registered. This must not be a fatal error because there
+ * are other accelerators that are exposed via this driver.
+ *
+ * Another case where this happens is on Tegra234 where the display
+ * hardware is no longer part of the host1x complex, so this driver
+ * will not expose any modesetting features.
+ */
+ if (drm->mode_config.num_crtc > 0) {
+ err = drm_aperture_remove_framebuffers(&tegra_drm_driver);
+ if (err < 0)
+ goto hub;
+ } else {
+ /*
+ * Indicate to userspace that this doesn't expose any display
+ * capabilities.
+ */
+ drm->driver_features &= ~(DRIVER_MODESET | DRIVER_ATOMIC);
+ }
err = tegra_drm_fb_init(drm);
if (err < 0)
diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c
index 86affe987..393b97b4a 100644
--- a/drivers/gpu/drm/ttm/ttm_pool.c
+++ b/drivers/gpu/drm/ttm/ttm_pool.c
@@ -383,7 +383,7 @@ static void ttm_pool_free_range(struct ttm_pool *pool, struct ttm_tt *tt,
enum ttm_caching caching,
pgoff_t start_page, pgoff_t end_page)
{
- struct page **pages = tt->pages;
+ struct page **pages = &tt->pages[start_page];
unsigned int order;
pgoff_t i, nr;
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index 0035affc3..9b2d23516 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -93,6 +93,7 @@ static int virtio_gpu_probe(struct virtio_device *vdev)
goto err_free;
}
+ dma_set_max_seg_size(dev->dev, dma_max_mapping_size(dev->dev) ?: UINT_MAX);
ret = virtio_gpu_init(vdev, dev);
if (ret)
goto err_free;