diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-07 13:18:06 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-07 13:18:06 +0000 |
commit | 638a9e433ecd61e64761352dbec1fa4f5874c941 (patch) | |
tree | fdbff74a238d7a5a7d1cef071b7230bc064b9f25 /drivers/gpu/drm/xe/display | |
parent | Releasing progress-linux version 6.9.12-1~progress7.99u1. (diff) | |
download | linux-638a9e433ecd61e64761352dbec1fa4f5874c941.tar.xz linux-638a9e433ecd61e64761352dbec1fa4f5874c941.zip |
Merging upstream version 6.10.3.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/gpu/drm/xe/display')
-rw-r--r-- | drivers/gpu/drm/xe/display/intel_fb_bo.c | 8 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/display/intel_fbdev_fb.c | 16 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/display/xe_display.c | 19 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/display/xe_dsb_buffer.c | 4 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/display/xe_fb_pin.c | 39 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/display/xe_hdcp_gsc.c | 244 | ||||
-rw-r--r-- | drivers/gpu/drm/xe/display/xe_plane_initial.c | 7 |
7 files changed, 273 insertions, 64 deletions
diff --git a/drivers/gpu/drm/xe/display/intel_fb_bo.c b/drivers/gpu/drm/xe/display/intel_fb_bo.c index a9c1f9885c..e18521acc5 100644 --- a/drivers/gpu/drm/xe/display/intel_fb_bo.c +++ b/drivers/gpu/drm/xe/display/intel_fb_bo.c @@ -11,7 +11,7 @@ void intel_fb_bo_framebuffer_fini(struct xe_bo *bo) { - if (bo->flags & XE_BO_CREATE_PINNED_BIT) { + if (bo->flags & XE_BO_FLAG_PINNED) { /* Unpin our kernel fb first */ xe_bo_lock(bo, false); xe_bo_unpin(bo); @@ -33,9 +33,9 @@ int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb, if (ret) goto err; - if (!(bo->flags & XE_BO_SCANOUT_BIT)) { + if (!(bo->flags & XE_BO_FLAG_SCANOUT)) { /* - * XE_BO_SCANOUT_BIT should ideally be set at creation, or is + * XE_BO_FLAG_SCANOUT should ideally be set at creation, or is * automatically set when creating FB. We cannot change caching * mode when the boect is VM_BINDed, so we can only set * coherency with display when unbound. @@ -45,7 +45,7 @@ int intel_fb_bo_framebuffer_init(struct intel_framebuffer *intel_fb, ret = -EINVAL; goto err; } - bo->flags |= XE_BO_SCANOUT_BIT; + bo->flags |= XE_BO_FLAG_SCANOUT; } ttm_bo_unreserve(&bo->ttm); return 0; diff --git a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c index 51ae3561fd..9e4bcfdbc7 100644 --- a/drivers/gpu/drm/xe/display/intel_fbdev_fb.c +++ b/drivers/gpu/drm/xe/display/intel_fbdev_fb.c @@ -42,9 +42,9 @@ struct drm_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper, if (!IS_DGFX(dev_priv)) { obj = xe_bo_create_pin_map(dev_priv, xe_device_get_root_tile(dev_priv), NULL, size, - ttm_bo_type_kernel, XE_BO_SCANOUT_BIT | - XE_BO_CREATE_STOLEN_BIT | - XE_BO_CREATE_PINNED_BIT); + ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT | + XE_BO_FLAG_STOLEN | + XE_BO_FLAG_PINNED); if (!IS_ERR(obj)) drm_info(&dev_priv->drm, "Allocated fbdev into stolen\n"); else @@ -52,9 +52,9 @@ struct drm_framebuffer *intel_fbdev_fb_alloc(struct drm_fb_helper *helper, } if (IS_ERR(obj)) { obj = xe_bo_create_pin_map(dev_priv, xe_device_get_root_tile(dev_priv), NULL, size, - ttm_bo_type_kernel, XE_BO_SCANOUT_BIT | - XE_BO_CREATE_VRAM_IF_DGFX(xe_device_get_root_tile(dev_priv)) | - XE_BO_CREATE_PINNED_BIT); + ttm_bo_type_kernel, XE_BO_FLAG_SCANOUT | + XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(dev_priv)) | + XE_BO_FLAG_PINNED); } if (IS_ERR(obj)) { @@ -81,8 +81,8 @@ int intel_fbdev_fb_fill_info(struct drm_i915_private *i915, struct fb_info *info { struct pci_dev *pdev = to_pci_dev(i915->drm.dev); - if (!(obj->flags & XE_BO_CREATE_SYSTEM_BIT)) { - if (obj->flags & XE_BO_CREATE_STOLEN_BIT) + if (!(obj->flags & XE_BO_FLAG_SYSTEM)) { + if (obj->flags & XE_BO_FLAG_STOLEN) info->fix.smem_start = xe_ttm_stolen_io_offset(obj, 0); else info->fix.smem_start = diff --git a/drivers/gpu/drm/xe/display/xe_display.c b/drivers/gpu/drm/xe/display/xe_display.c index 6ec375c1c4..0de0566e5b 100644 --- a/drivers/gpu/drm/xe/display/xe_display.c +++ b/drivers/gpu/drm/xe/display/xe_display.c @@ -51,14 +51,6 @@ bool xe_display_driver_probe_defer(struct pci_dev *pdev) return intel_display_driver_probe_defer(pdev); } -static void xe_display_last_close(struct drm_device *dev) -{ - struct xe_device *xe = to_xe_device(dev); - - if (xe->info.enable_display) - intel_fbdev_restore_mode(to_xe_device(dev)); -} - /** * xe_display_driver_set_hooks - Add driver flags and hooks for display * @driver: DRM device driver @@ -73,7 +65,6 @@ void xe_display_driver_set_hooks(struct drm_driver *driver) return; driver->driver_features |= DRIVER_MODESET | DRIVER_ATOMIC; - driver->lastclose = xe_display_last_close; } static void unset_display_features(struct xe_device *xe) @@ -101,8 +92,6 @@ static void display_destroy(struct drm_device *dev, void *dummy) */ int xe_display_create(struct xe_device *xe) { - int err; - spin_lock_init(&xe->display.fb_tracking.lock); xe->display.hotplug.dp_wq = alloc_ordered_workqueue("xe-dp", 0); @@ -110,11 +99,7 @@ int xe_display_create(struct xe_device *xe) drmm_mutex_init(&xe->drm, &xe->sb_lock); xe->enabled_irq_mask = ~0; - err = drmm_add_action_or_reset(&xe->drm, display_destroy, NULL); - if (err) - return err; - - return 0; + return drmm_add_action_or_reset(&xe->drm, display_destroy, NULL); } static void xe_display_fini_nommio(struct drm_device *dev, void *dummy) @@ -218,9 +203,7 @@ void xe_display_fini(struct xe_device *xe) if (!xe->info.enable_display) return; - /* poll work can call into fbdev, hence clean that up afterwards */ intel_hpd_poll_fini(xe); - intel_fbdev_fini(xe); intel_hdcp_component_fini(xe); intel_audio_deinit(xe); diff --git a/drivers/gpu/drm/xe/display/xe_dsb_buffer.c b/drivers/gpu/drm/xe/display/xe_dsb_buffer.c index 27c2fb1c00..44c9fd2143 100644 --- a/drivers/gpu/drm/xe/display/xe_dsb_buffer.c +++ b/drivers/gpu/drm/xe/display/xe_dsb_buffer.c @@ -45,8 +45,8 @@ bool intel_dsb_buffer_create(struct intel_crtc *crtc, struct intel_dsb_buffer *d obj = xe_bo_create_pin_map(i915, xe_device_get_root_tile(i915), NULL, PAGE_ALIGN(size), ttm_bo_type_kernel, - XE_BO_CREATE_VRAM_IF_DGFX(xe_device_get_root_tile(i915)) | - XE_BO_CREATE_GGTT_BIT); + XE_BO_FLAG_VRAM_IF_DGFX(xe_device_get_root_tile(i915)) | + XE_BO_FLAG_GGTT); if (IS_ERR(obj)) { kfree(vma); return false; diff --git a/drivers/gpu/drm/xe/display/xe_fb_pin.c b/drivers/gpu/drm/xe/display/xe_fb_pin.c index 722c84a566..3e1ae37c4c 100644 --- a/drivers/gpu/drm/xe/display/xe_fb_pin.c +++ b/drivers/gpu/drm/xe/display/xe_fb_pin.c @@ -10,6 +10,7 @@ #include "intel_fb_pin.h" #include "xe_ggtt.h" #include "xe_gt.h" +#include "xe_pm.h" #include <drm/ttm/ttm_bo.h> @@ -30,7 +31,7 @@ write_dpt_rotated(struct xe_bo *bo, struct iosys_map *map, u32 *dpt_ofs, u32 bo_ for (row = 0; row < height; row++) { u64 pte = ggtt->pt_ops->pte_encode_bo(bo, src_idx * XE_PAGE_SIZE, - xe->pat.idx[XE_CACHE_WB]); + xe->pat.idx[XE_CACHE_NONE]); iosys_map_wr(map, *dpt_ofs, u64, pte); *dpt_ofs += 8; @@ -62,7 +63,7 @@ write_dpt_remapped(struct xe_bo *bo, struct iosys_map *map, u32 *dpt_ofs, for (column = 0; column < width; column++) { iosys_map_wr(map, *dpt_ofs, u64, pte_encode_bo(bo, src_idx * XE_PAGE_SIZE, - xe->pat.idx[XE_CACHE_WB])); + xe->pat.idx[XE_CACHE_NONE])); *dpt_ofs += 8; src_idx++; @@ -99,18 +100,21 @@ static int __xe_pin_fb_vma_dpt(struct intel_framebuffer *fb, if (IS_DGFX(xe)) dpt = xe_bo_create_pin_map(xe, tile0, NULL, dpt_size, ttm_bo_type_kernel, - XE_BO_CREATE_VRAM0_BIT | - XE_BO_CREATE_GGTT_BIT); + XE_BO_FLAG_VRAM0 | + XE_BO_FLAG_GGTT | + XE_BO_FLAG_PAGETABLE); else dpt = xe_bo_create_pin_map(xe, tile0, NULL, dpt_size, ttm_bo_type_kernel, - XE_BO_CREATE_STOLEN_BIT | - XE_BO_CREATE_GGTT_BIT); + XE_BO_FLAG_STOLEN | + XE_BO_FLAG_GGTT | + XE_BO_FLAG_PAGETABLE); if (IS_ERR(dpt)) dpt = xe_bo_create_pin_map(xe, tile0, NULL, dpt_size, ttm_bo_type_kernel, - XE_BO_CREATE_SYSTEM_BIT | - XE_BO_CREATE_GGTT_BIT); + XE_BO_FLAG_SYSTEM | + XE_BO_FLAG_GGTT | + XE_BO_FLAG_PAGETABLE); if (IS_ERR(dpt)) return PTR_ERR(dpt); @@ -119,7 +123,7 @@ static int __xe_pin_fb_vma_dpt(struct intel_framebuffer *fb, for (x = 0; x < size / XE_PAGE_SIZE; x++) { u64 pte = ggtt->pt_ops->pte_encode_bo(bo, x * XE_PAGE_SIZE, - xe->pat.idx[XE_CACHE_WB]); + xe->pat.idx[XE_CACHE_NONE]); iosys_map_wr(&dpt->vmap, x * 8, u64, pte); } @@ -165,7 +169,7 @@ write_ggtt_rotated(struct xe_bo *bo, struct xe_ggtt *ggtt, u32 *ggtt_ofs, u32 bo for (row = 0; row < height; row++) { u64 pte = ggtt->pt_ops->pte_encode_bo(bo, src_idx * XE_PAGE_SIZE, - xe->pat.idx[XE_CACHE_WB]); + xe->pat.idx[XE_CACHE_NONE]); xe_ggtt_set_pte(ggtt, *ggtt_ofs, pte); *ggtt_ofs += XE_PAGE_SIZE; @@ -190,7 +194,7 @@ static int __xe_pin_fb_vma_ggtt(struct intel_framebuffer *fb, /* TODO: Consider sharing framebuffer mapping? * embed i915_vma inside intel_framebuffer */ - xe_device_mem_access_get(tile_to_xe(ggtt->tile)); + xe_pm_runtime_get_noresume(tile_to_xe(ggtt->tile)); ret = mutex_lock_interruptible(&ggtt->lock); if (ret) goto out; @@ -211,7 +215,7 @@ static int __xe_pin_fb_vma_ggtt(struct intel_framebuffer *fb, for (x = 0; x < size; x += XE_PAGE_SIZE) { u64 pte = ggtt->pt_ops->pte_encode_bo(bo, x, - xe->pat.idx[XE_CACHE_WB]); + xe->pat.idx[XE_CACHE_NONE]); xe_ggtt_set_pte(ggtt, vma->node.start + x, pte); } @@ -238,11 +242,10 @@ static int __xe_pin_fb_vma_ggtt(struct intel_framebuffer *fb, rot_info->plane[i].dst_stride); } - xe_ggtt_invalidate(ggtt); out_unlock: mutex_unlock(&ggtt->lock); out: - xe_device_mem_access_put(tile_to_xe(ggtt->tile)); + xe_pm_runtime_put(tile_to_xe(ggtt->tile)); return ret; } @@ -260,7 +263,7 @@ static struct i915_vma *__xe_pin_fb_vma(struct intel_framebuffer *fb, if (IS_DGFX(to_xe_device(bo->ttm.base.dev)) && intel_fb_rc_ccs_cc_plane(&fb->base) >= 0 && - !(bo->flags & XE_BO_NEEDS_CPU_ACCESS)) { + !(bo->flags & XE_BO_FLAG_NEEDS_CPU_ACCESS)) { struct xe_tile *tile = xe_device_get_root_tile(xe); /* @@ -321,7 +324,7 @@ static void __xe_unpin_fb_vma(struct i915_vma *vma) xe_bo_unpin_map_no_vm(vma->dpt); else if (!drm_mm_node_allocated(&vma->bo->ggtt_node) || vma->bo->ggtt_node.start != vma->node.start) - xe_ggtt_remove_node(ggtt, &vma->node); + xe_ggtt_remove_node(ggtt, &vma->node, false); ttm_bo_reserve(&vma->bo->ttm, false, false, NULL); ttm_bo_unpin(&vma->bo->ttm); @@ -353,7 +356,7 @@ int intel_plane_pin_fb(struct intel_plane_state *plane_state) struct i915_vma *vma; /* We reject creating !SCANOUT fb's, so this is weird.. */ - drm_WARN_ON(bo->ttm.base.dev, !(bo->flags & XE_BO_SCANOUT_BIT)); + drm_WARN_ON(bo->ttm.base.dev, !(bo->flags & XE_BO_FLAG_SCANOUT)); vma = __xe_pin_fb_vma(to_intel_framebuffer(fb), &plane_state->view.gtt); if (IS_ERR(vma)) @@ -381,4 +384,4 @@ struct i915_address_space *intel_dpt_create(struct intel_framebuffer *fb) void intel_dpt_destroy(struct i915_address_space *vm) { return; -}
\ No newline at end of file +} diff --git a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c index 0f11a39333..b3d3c065dd 100644 --- a/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c +++ b/drivers/gpu/drm/xe/display/xe_hdcp_gsc.c @@ -3,32 +3,254 @@ * Copyright 2023, Intel Corporation. */ -#include "i915_drv.h" +#include <drm/drm_print.h> +#include <drm/i915_hdcp_interface.h> +#include <linux/delay.h> + +#include "abi/gsc_command_header_abi.h" #include "intel_hdcp_gsc.h" +#include "intel_hdcp_gsc_message.h" +#include "xe_bo.h" +#include "xe_device.h" +#include "xe_device_types.h" +#include "xe_gsc_proxy.h" +#include "xe_gsc_submit.h" +#include "xe_gt.h" +#include "xe_map.h" +#include "xe_pm.h" +#include "xe_uc_fw.h" + +#define HECI_MEADDRESS_HDCP 18 + +struct intel_hdcp_gsc_message { + struct xe_bo *hdcp_bo; + u64 hdcp_cmd_in; + u64 hdcp_cmd_out; +}; -bool intel_hdcp_gsc_cs_required(struct drm_i915_private *i915) +#define HDCP_GSC_HEADER_SIZE sizeof(struct intel_gsc_mtl_header) + +bool intel_hdcp_gsc_cs_required(struct xe_device *xe) { - return true; + return DISPLAY_VER(xe) >= 14; } -bool intel_hdcp_gsc_check_status(struct drm_i915_private *i915) +bool intel_hdcp_gsc_check_status(struct xe_device *xe) { - return false; + struct xe_tile *tile = xe_device_get_root_tile(xe); + struct xe_gt *gt = tile->media_gt; + bool ret = true; + + if (!xe_uc_fw_is_enabled(>->uc.gsc.fw)) + return false; + + xe_pm_runtime_get(xe); + if (xe_force_wake_get(gt_to_fw(gt), XE_FW_GSC)) { + drm_dbg_kms(&xe->drm, + "failed to get forcewake to check proxy status\n"); + ret = false; + goto out; + } + + if (!xe_gsc_proxy_init_done(>->uc.gsc)) + ret = false; + + xe_force_wake_put(gt_to_fw(gt), XE_FW_GSC); +out: + xe_pm_runtime_put(xe); + return ret; } -int intel_hdcp_gsc_init(struct drm_i915_private *i915) +/*This function helps allocate memory for the command that we will send to gsc cs */ +static int intel_hdcp_gsc_initialize_message(struct xe_device *xe, + struct intel_hdcp_gsc_message *hdcp_message) { - drm_info(&i915->drm, "HDCP support not yet implemented\n"); - return -ENODEV; + struct xe_bo *bo = NULL; + u64 cmd_in, cmd_out; + int ret = 0; + + /* allocate object of two page for HDCP command memory and store it */ + bo = xe_bo_create_pin_map(xe, xe_device_get_root_tile(xe), NULL, PAGE_SIZE * 2, + ttm_bo_type_kernel, + XE_BO_FLAG_SYSTEM | + XE_BO_FLAG_GGTT); + + if (IS_ERR(bo)) { + drm_err(&xe->drm, "Failed to allocate bo for HDCP streaming command!\n"); + ret = PTR_ERR(bo); + goto out; + } + + cmd_in = xe_bo_ggtt_addr(bo); + cmd_out = cmd_in + PAGE_SIZE; + xe_map_memset(xe, &bo->vmap, 0, 0, bo->size); + + hdcp_message->hdcp_bo = bo; + hdcp_message->hdcp_cmd_in = cmd_in; + hdcp_message->hdcp_cmd_out = cmd_out; +out: + return ret; } -void intel_hdcp_gsc_fini(struct drm_i915_private *i915) +static int intel_hdcp_gsc_hdcp2_init(struct xe_device *xe) { + struct intel_hdcp_gsc_message *hdcp_message; + int ret; + + hdcp_message = kzalloc(sizeof(*hdcp_message), GFP_KERNEL); + + if (!hdcp_message) + return -ENOMEM; + + /* + * NOTE: No need to lock the comp mutex here as it is already + * going to be taken before this function called + */ + ret = intel_hdcp_gsc_initialize_message(xe, hdcp_message); + if (ret) { + drm_err(&xe->drm, "Could not initialize hdcp_message\n"); + kfree(hdcp_message); + return ret; + } + + xe->display.hdcp.hdcp_message = hdcp_message; + return ret; } -ssize_t intel_hdcp_gsc_msg_send(struct drm_i915_private *i915, u8 *msg_in, +static const struct i915_hdcp_ops gsc_hdcp_ops = { + .initiate_hdcp2_session = intel_hdcp_gsc_initiate_session, + .verify_receiver_cert_prepare_km = + intel_hdcp_gsc_verify_receiver_cert_prepare_km, + .verify_hprime = intel_hdcp_gsc_verify_hprime, + .store_pairing_info = intel_hdcp_gsc_store_pairing_info, + .initiate_locality_check = intel_hdcp_gsc_initiate_locality_check, + .verify_lprime = intel_hdcp_gsc_verify_lprime, + .get_session_key = intel_hdcp_gsc_get_session_key, + .repeater_check_flow_prepare_ack = + intel_hdcp_gsc_repeater_check_flow_prepare_ack, + .verify_mprime = intel_hdcp_gsc_verify_mprime, + .enable_hdcp_authentication = intel_hdcp_gsc_enable_authentication, + .close_hdcp_session = intel_hdcp_gsc_close_session, +}; + +int intel_hdcp_gsc_init(struct xe_device *xe) +{ + struct i915_hdcp_arbiter *data; + int ret; + + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + mutex_lock(&xe->display.hdcp.hdcp_mutex); + xe->display.hdcp.arbiter = data; + xe->display.hdcp.arbiter->hdcp_dev = xe->drm.dev; + xe->display.hdcp.arbiter->ops = &gsc_hdcp_ops; + ret = intel_hdcp_gsc_hdcp2_init(xe); + if (ret) + kfree(data); + + mutex_unlock(&xe->display.hdcp.hdcp_mutex); + + return ret; +} + +void intel_hdcp_gsc_fini(struct xe_device *xe) +{ + struct intel_hdcp_gsc_message *hdcp_message = + xe->display.hdcp.hdcp_message; + struct i915_hdcp_arbiter *arb = xe->display.hdcp.arbiter; + + if (hdcp_message) { + xe_bo_unpin_map_no_vm(hdcp_message->hdcp_bo); + kfree(hdcp_message); + xe->display.hdcp.hdcp_message = NULL; + } + + kfree(arb); + xe->display.hdcp.arbiter = NULL; +} + +static int xe_gsc_send_sync(struct xe_device *xe, + struct intel_hdcp_gsc_message *hdcp_message, + u32 msg_size_in, u32 msg_size_out, + u32 addr_out_off) +{ + struct xe_gt *gt = hdcp_message->hdcp_bo->tile->media_gt; + struct iosys_map *map = &hdcp_message->hdcp_bo->vmap; + struct xe_gsc *gsc = >->uc.gsc; + int ret; + + ret = xe_gsc_pkt_submit_kernel(gsc, hdcp_message->hdcp_cmd_in, msg_size_in, + hdcp_message->hdcp_cmd_out, msg_size_out); + if (ret) { + drm_err(&xe->drm, "failed to send gsc HDCP msg (%d)\n", ret); + return ret; + } + + if (xe_gsc_check_and_update_pending(xe, map, 0, map, addr_out_off)) + return -EAGAIN; + + ret = xe_gsc_read_out_header(xe, map, addr_out_off, + sizeof(struct hdcp_cmd_header), NULL); + + return ret; +} + +ssize_t intel_hdcp_gsc_msg_send(struct xe_device *xe, u8 *msg_in, size_t msg_in_len, u8 *msg_out, size_t msg_out_len) { - return -ENODEV; + const size_t max_msg_size = PAGE_SIZE - HDCP_GSC_HEADER_SIZE; + struct intel_hdcp_gsc_message *hdcp_message; + u64 host_session_id; + u32 msg_size_in, msg_size_out; + u32 addr_out_off, addr_in_wr_off = 0; + int ret, tries = 0; + + if (msg_in_len > max_msg_size || msg_out_len > max_msg_size) { + ret = -ENOSPC; + goto out; + } + + msg_size_in = msg_in_len + HDCP_GSC_HEADER_SIZE; + msg_size_out = msg_out_len + HDCP_GSC_HEADER_SIZE; + hdcp_message = xe->display.hdcp.hdcp_message; + addr_out_off = PAGE_SIZE; + + host_session_id = xe_gsc_create_host_session_id(); + xe_pm_runtime_get_noresume(xe); + addr_in_wr_off = xe_gsc_emit_header(xe, &hdcp_message->hdcp_bo->vmap, + addr_in_wr_off, HECI_MEADDRESS_HDCP, + host_session_id, msg_in_len); + xe_map_memcpy_to(xe, &hdcp_message->hdcp_bo->vmap, addr_in_wr_off, + msg_in, msg_in_len); + /* + * Keep sending request in case the pending bit is set no need to add + * message handle as we are using same address hence loc. of header is + * same and it will contain the message handle. we will send the message + * 20 times each message 50 ms apart + */ + do { + ret = xe_gsc_send_sync(xe, hdcp_message, msg_size_in, msg_size_out, + addr_out_off); + + /* Only try again if gsc says so */ + if (ret != -EAGAIN) + break; + + msleep(50); + + } while (++tries < 20); + + if (ret) + goto out; + + xe_map_memcpy_from(xe, msg_out, &hdcp_message->hdcp_bo->vmap, + addr_out_off + HDCP_GSC_HEADER_SIZE, + msg_out_len); + +out: + xe_pm_runtime_put(xe); + return ret; } diff --git a/drivers/gpu/drm/xe/display/xe_plane_initial.c b/drivers/gpu/drm/xe/display/xe_plane_initial.c index 866d1dd6ee..9693c56d38 100644 --- a/drivers/gpu/drm/xe/display/xe_plane_initial.c +++ b/drivers/gpu/drm/xe/display/xe_plane_initial.c @@ -6,6 +6,7 @@ /* for ioread64 */ #include <linux/io-64-nonatomic-lo-hi.h> +#include "regs/xe_gtt_defs.h" #include "xe_ggtt.h" #include "i915_drv.h" @@ -62,7 +63,7 @@ initial_plane_bo(struct xe_device *xe, if (plane_config->size == 0) return NULL; - flags = XE_BO_CREATE_PINNED_BIT | XE_BO_SCANOUT_BIT | XE_BO_CREATE_GGTT_BIT; + flags = XE_BO_FLAG_PINNED | XE_BO_FLAG_SCANOUT | XE_BO_FLAG_GGTT; base = round_down(plane_config->base, page_size); if (IS_DGFX(xe)) { @@ -79,7 +80,7 @@ initial_plane_bo(struct xe_device *xe, } phys_base = pte & ~(page_size - 1); - flags |= XE_BO_CREATE_VRAM0_BIT; + flags |= XE_BO_FLAG_VRAM0; /* * We don't currently expect this to ever be placed in the @@ -101,7 +102,7 @@ initial_plane_bo(struct xe_device *xe, if (!stolen) return NULL; phys_base = base; - flags |= XE_BO_CREATE_STOLEN_BIT; + flags |= XE_BO_FLAG_STOLEN; /* * If the FB is too big, just don't use it since fbdev is not very |