diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 18:50:03 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 18:50:03 +0000 |
commit | 01a69402cf9d38ff180345d55c2ee51c7e89fbc7 (patch) | |
tree | b406c5242a088c4f59c6e4b719b783f43aca6ae9 /drivers/gpu/drm/xe/tests | |
parent | Adding upstream version 6.7.12. (diff) | |
download | linux-01a69402cf9d38ff180345d55c2ee51c7e89fbc7.tar.xz linux-01a69402cf9d38ff180345d55c2ee51c7e89fbc7.zip |
Adding upstream version 6.8.9.upstream/6.8.9
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/gpu/drm/xe/tests')
20 files changed, 2267 insertions, 0 deletions
diff --git a/drivers/gpu/drm/xe/tests/Makefile b/drivers/gpu/drm/xe/tests/Makefile new file mode 100644 index 0000000000..39d8a08922 --- /dev/null +++ b/drivers/gpu/drm/xe/tests/Makefile @@ -0,0 +1,10 @@ +# SPDX-License-Identifier: GPL-2.0 + +obj-$(CONFIG_DRM_XE_KUNIT_TEST) += \ + xe_bo_test.o \ + xe_dma_buf_test.o \ + xe_migrate_test.o \ + xe_mocs_test.o \ + xe_pci_test.o \ + xe_rtp_test.o \ + xe_wa_test.o diff --git a/drivers/gpu/drm/xe/tests/xe_bo.c b/drivers/gpu/drm/xe/tests/xe_bo.c new file mode 100644 index 0000000000..3436fd9cf2 --- /dev/null +++ b/drivers/gpu/drm/xe/tests/xe_bo.c @@ -0,0 +1,352 @@ +// SPDX-License-Identifier: GPL-2.0 AND MIT +/* + * Copyright © 2022 Intel Corporation + */ + +#include <kunit/test.h> +#include <kunit/visibility.h> + +#include "tests/xe_bo_test.h" +#include "tests/xe_pci_test.h" +#include "tests/xe_test.h" + +#include "xe_bo_evict.h" +#include "xe_pci.h" +#include "xe_pm.h" + +static int ccs_test_migrate(struct xe_tile *tile, struct xe_bo *bo, + bool clear, u64 get_val, u64 assign_val, + struct kunit *test) +{ + struct dma_fence *fence; + struct ttm_tt *ttm; + struct page *page; + pgoff_t ccs_page; + long timeout; + u64 *cpu_map; + int ret; + u32 offset; + + /* Move bo to VRAM if not already there. */ + ret = xe_bo_validate(bo, NULL, false); + if (ret) { + KUNIT_FAIL(test, "Failed to validate bo.\n"); + return ret; + } + + /* Optionally clear bo *and* CCS data in VRAM. */ + if (clear) { + fence = xe_migrate_clear(tile->migrate, bo, bo->ttm.resource); + if (IS_ERR(fence)) { + KUNIT_FAIL(test, "Failed to submit bo clear.\n"); + return PTR_ERR(fence); + } + dma_fence_put(fence); + } + + /* Evict to system. CCS data should be copied. */ + ret = xe_bo_evict(bo, true); + if (ret) { + KUNIT_FAIL(test, "Failed to evict bo.\n"); + return ret; + } + + /* Sync all migration blits */ + timeout = dma_resv_wait_timeout(bo->ttm.base.resv, + DMA_RESV_USAGE_KERNEL, + true, + 5 * HZ); + if (timeout <= 0) { + KUNIT_FAIL(test, "Failed to sync bo eviction.\n"); + return -ETIME; + } + + /* + * Bo with CCS data is now in system memory. Verify backing store + * and data integrity. Then assign for the next testing round while + * we still have a CPU map. + */ + ttm = bo->ttm.ttm; + if (!ttm || !ttm_tt_is_populated(ttm)) { + KUNIT_FAIL(test, "Bo was not in expected placement.\n"); + return -EINVAL; + } + + ccs_page = xe_bo_ccs_pages_start(bo) >> PAGE_SHIFT; + if (ccs_page >= ttm->num_pages) { + KUNIT_FAIL(test, "No TTM CCS pages present.\n"); + return -EINVAL; + } + + page = ttm->pages[ccs_page]; + cpu_map = kmap_local_page(page); + + /* Check first CCS value */ + if (cpu_map[0] != get_val) { + KUNIT_FAIL(test, + "Expected CCS readout 0x%016llx, got 0x%016llx.\n", + (unsigned long long)get_val, + (unsigned long long)cpu_map[0]); + ret = -EINVAL; + } + + /* Check last CCS value, or at least last value in page. */ + offset = xe_device_ccs_bytes(tile_to_xe(tile), bo->size); + offset = min_t(u32, offset, PAGE_SIZE) / sizeof(u64) - 1; + if (cpu_map[offset] != get_val) { + KUNIT_FAIL(test, + "Expected CCS readout 0x%016llx, got 0x%016llx.\n", + (unsigned long long)get_val, + (unsigned long long)cpu_map[offset]); + ret = -EINVAL; + } + + cpu_map[0] = assign_val; + cpu_map[offset] = assign_val; + kunmap_local(cpu_map); + + return ret; +} + +static void ccs_test_run_tile(struct xe_device *xe, struct xe_tile *tile, + struct kunit *test) +{ + struct xe_bo *bo; + + int ret; + + /* TODO: Sanity check */ + unsigned int bo_flags = XE_BO_CREATE_VRAM_IF_DGFX(tile); + + if (IS_DGFX(xe)) + kunit_info(test, "Testing vram id %u\n", tile->id); + else + kunit_info(test, "Testing system memory\n"); + + bo = xe_bo_create_user(xe, NULL, NULL, SZ_1M, DRM_XE_GEM_CPU_CACHING_WC, + ttm_bo_type_device, bo_flags); + if (IS_ERR(bo)) { + KUNIT_FAIL(test, "Failed to create bo.\n"); + return; + } + + xe_bo_lock(bo, false); + + kunit_info(test, "Verifying that CCS data is cleared on creation.\n"); + ret = ccs_test_migrate(tile, bo, false, 0ULL, 0xdeadbeefdeadbeefULL, + test); + if (ret) + goto out_unlock; + + kunit_info(test, "Verifying that CCS data survives migration.\n"); + ret = ccs_test_migrate(tile, bo, false, 0xdeadbeefdeadbeefULL, + 0xdeadbeefdeadbeefULL, test); + if (ret) + goto out_unlock; + + kunit_info(test, "Verifying that CCS data can be properly cleared.\n"); + ret = ccs_test_migrate(tile, bo, true, 0ULL, 0ULL, test); + +out_unlock: + xe_bo_unlock(bo); + xe_bo_put(bo); +} + +static int ccs_test_run_device(struct xe_device *xe) +{ + struct kunit *test = xe_cur_kunit(); + struct xe_tile *tile; + int id; + + if (!xe_device_has_flat_ccs(xe)) { + kunit_info(test, "Skipping non-flat-ccs device.\n"); + return 0; + } + + xe_device_mem_access_get(xe); + + for_each_tile(tile, xe, id) { + /* For igfx run only for primary tile */ + if (!IS_DGFX(xe) && id > 0) + continue; + ccs_test_run_tile(xe, tile, test); + } + + xe_device_mem_access_put(xe); + + return 0; +} + +void xe_ccs_migrate_kunit(struct kunit *test) +{ + xe_call_for_each_device(ccs_test_run_device); +} +EXPORT_SYMBOL_IF_KUNIT(xe_ccs_migrate_kunit); + +static int evict_test_run_tile(struct xe_device *xe, struct xe_tile *tile, struct kunit *test) +{ + struct xe_bo *bo, *external; + unsigned int bo_flags = XE_BO_CREATE_VRAM_IF_DGFX(tile); + struct xe_vm *vm = xe_migrate_get_vm(xe_device_get_root_tile(xe)->migrate); + struct xe_gt *__gt; + int err, i, id; + + kunit_info(test, "Testing device %s vram id %u\n", + dev_name(xe->drm.dev), tile->id); + + for (i = 0; i < 2; ++i) { + xe_vm_lock(vm, false); + bo = xe_bo_create_user(xe, NULL, vm, 0x10000, + DRM_XE_GEM_CPU_CACHING_WC, + ttm_bo_type_device, + bo_flags); + xe_vm_unlock(vm); + if (IS_ERR(bo)) { + KUNIT_FAIL(test, "bo create err=%pe\n", bo); + break; + } + + external = xe_bo_create_user(xe, NULL, NULL, 0x10000, + DRM_XE_GEM_CPU_CACHING_WC, + ttm_bo_type_device, bo_flags); + if (IS_ERR(external)) { + KUNIT_FAIL(test, "external bo create err=%pe\n", external); + goto cleanup_bo; + } + + xe_bo_lock(external, false); + err = xe_bo_pin_external(external); + xe_bo_unlock(external); + if (err) { + KUNIT_FAIL(test, "external bo pin err=%pe\n", + ERR_PTR(err)); + goto cleanup_external; + } + + err = xe_bo_evict_all(xe); + if (err) { + KUNIT_FAIL(test, "evict err=%pe\n", ERR_PTR(err)); + goto cleanup_all; + } + + for_each_gt(__gt, xe, id) + xe_gt_sanitize(__gt); + err = xe_bo_restore_kernel(xe); + /* + * Snapshotting the CTB and copying back a potentially old + * version seems risky, depending on what might have been + * inflight. Also it seems snapshotting the ADS object and + * copying back results in serious breakage. Normally when + * calling xe_bo_restore_kernel() we always fully restart the + * GT, which re-intializes such things. We could potentially + * skip saving and restoring such objects in xe_bo_evict_all() + * however seems quite fragile not to also restart the GT. Try + * to do that here by triggering a GT reset. + */ + for_each_gt(__gt, xe, id) { + xe_gt_reset_async(__gt); + flush_work(&__gt->reset.worker); + } + if (err) { + KUNIT_FAIL(test, "restore kernel err=%pe\n", + ERR_PTR(err)); + goto cleanup_all; + } + + err = xe_bo_restore_user(xe); + if (err) { + KUNIT_FAIL(test, "restore user err=%pe\n", ERR_PTR(err)); + goto cleanup_all; + } + + if (!xe_bo_is_vram(external)) { + KUNIT_FAIL(test, "external bo is not vram\n"); + err = -EPROTO; + goto cleanup_all; + } + + if (xe_bo_is_vram(bo)) { + KUNIT_FAIL(test, "bo is vram\n"); + err = -EPROTO; + goto cleanup_all; + } + + if (i) { + down_read(&vm->lock); + xe_vm_lock(vm, false); + err = xe_bo_validate(bo, bo->vm, false); + xe_vm_unlock(vm); + up_read(&vm->lock); + if (err) { + KUNIT_FAIL(test, "bo valid err=%pe\n", + ERR_PTR(err)); + goto cleanup_all; + } + xe_bo_lock(external, false); + err = xe_bo_validate(external, NULL, false); + xe_bo_unlock(external); + if (err) { + KUNIT_FAIL(test, "external bo valid err=%pe\n", + ERR_PTR(err)); + goto cleanup_all; + } + } + + xe_bo_lock(external, false); + xe_bo_unpin_external(external); + xe_bo_unlock(external); + + xe_bo_put(external); + + xe_bo_lock(bo, false); + __xe_bo_unset_bulk_move(bo); + xe_bo_unlock(bo); + xe_bo_put(bo); + continue; + +cleanup_all: + xe_bo_lock(external, false); + xe_bo_unpin_external(external); + xe_bo_unlock(external); +cleanup_external: + xe_bo_put(external); +cleanup_bo: + xe_bo_lock(bo, false); + __xe_bo_unset_bulk_move(bo); + xe_bo_unlock(bo); + xe_bo_put(bo); + break; + } + + xe_vm_put(vm); + + return 0; +} + +static int evict_test_run_device(struct xe_device *xe) +{ + struct kunit *test = xe_cur_kunit(); + struct xe_tile *tile; + int id; + + if (!IS_DGFX(xe)) { + kunit_info(test, "Skipping non-discrete device %s.\n", + dev_name(xe->drm.dev)); + return 0; + } + + xe_device_mem_access_get(xe); + + for_each_tile(tile, xe, id) + evict_test_run_tile(xe, tile, test); + + xe_device_mem_access_put(xe); + + return 0; +} + +void xe_bo_evict_kunit(struct kunit *test) +{ + xe_call_for_each_device(evict_test_run_device); +} +EXPORT_SYMBOL_IF_KUNIT(xe_bo_evict_kunit); diff --git a/drivers/gpu/drm/xe/tests/xe_bo_test.c b/drivers/gpu/drm/xe/tests/xe_bo_test.c new file mode 100644 index 0000000000..f408f17f21 --- /dev/null +++ b/drivers/gpu/drm/xe/tests/xe_bo_test.c @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright © 2022 Intel Corporation + */ + +#include "xe_bo_test.h" + +#include <kunit/test.h> + +static struct kunit_case xe_bo_tests[] = { + KUNIT_CASE(xe_ccs_migrate_kunit), + KUNIT_CASE(xe_bo_evict_kunit), + {} +}; + +static struct kunit_suite xe_bo_test_suite = { + .name = "xe_bo", + .test_cases = xe_bo_tests, +}; + +kunit_test_suite(xe_bo_test_suite); + +MODULE_AUTHOR("Intel Corporation"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("xe_bo kunit test"); +MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING); diff --git a/drivers/gpu/drm/xe/tests/xe_bo_test.h b/drivers/gpu/drm/xe/tests/xe_bo_test.h new file mode 100644 index 0000000000..0113ab4506 --- /dev/null +++ b/drivers/gpu/drm/xe/tests/xe_bo_test.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 AND MIT */ +/* + * Copyright © 2023 Intel Corporation + */ + +#ifndef _XE_BO_TEST_H_ +#define _XE_BO_TEST_H_ + +struct kunit; + +void xe_ccs_migrate_kunit(struct kunit *test); +void xe_bo_evict_kunit(struct kunit *test); + +#endif diff --git a/drivers/gpu/drm/xe/tests/xe_dma_buf.c b/drivers/gpu/drm/xe/tests/xe_dma_buf.c new file mode 100644 index 0000000000..9f6d571d7f --- /dev/null +++ b/drivers/gpu/drm/xe/tests/xe_dma_buf.c @@ -0,0 +1,278 @@ +// SPDX-License-Identifier: GPL-2.0 AND MIT +/* + * Copyright © 2022 Intel Corporation + */ + +#include <drm/xe_drm.h> + +#include <kunit/test.h> +#include <kunit/visibility.h> + +#include "tests/xe_dma_buf_test.h" +#include "tests/xe_pci_test.h" + +#include "xe_pci.h" + +static bool p2p_enabled(struct dma_buf_test_params *params) +{ + return IS_ENABLED(CONFIG_PCI_P2PDMA) && params->attach_ops && + params->attach_ops->allow_peer2peer; +} + +static bool is_dynamic(struct dma_buf_test_params *params) +{ + return IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY) && params->attach_ops && + params->attach_ops->move_notify; +} + +static void check_residency(struct kunit *test, struct xe_bo *exported, + struct xe_bo *imported, struct dma_buf *dmabuf) +{ + struct dma_buf_test_params *params = to_dma_buf_test_params(test->priv); + u32 mem_type; + int ret; + + xe_bo_assert_held(exported); + xe_bo_assert_held(imported); + + mem_type = XE_PL_VRAM0; + if (!(params->mem_mask & XE_BO_CREATE_VRAM0_BIT)) + /* No VRAM allowed */ + mem_type = XE_PL_TT; + else if (params->force_different_devices && !p2p_enabled(params)) + /* No P2P */ + mem_type = XE_PL_TT; + else if (params->force_different_devices && !is_dynamic(params) && + (params->mem_mask & XE_BO_CREATE_SYSTEM_BIT)) + /* Pin migrated to TT */ + mem_type = XE_PL_TT; + + if (!xe_bo_is_mem_type(exported, mem_type)) { + KUNIT_FAIL(test, "Exported bo was not in expected memory type.\n"); + return; + } + + if (xe_bo_is_pinned(exported)) + return; + + /* + * Evict exporter. Note that the gem object dma_buf member isn't + * set from xe_gem_prime_export(), and it's needed for the move_notify() + * functionality, so hack that up here. Evicting the exported bo will + * evict also the imported bo through the move_notify() functionality if + * importer is on a different device. If they're on the same device, + * the exporter and the importer should be the same bo. + */ + swap(exported->ttm.base.dma_buf, dmabuf); + ret = xe_bo_evict(exported, true); + swap(exported->ttm.base.dma_buf, dmabuf); + if (ret) { + if (ret != -EINTR && ret != -ERESTARTSYS) + KUNIT_FAIL(test, "Evicting exporter failed with err=%d.\n", + ret); + return; + } + + /* Verify that also importer has been evicted to SYSTEM */ + if (exported != imported && !xe_bo_is_mem_type(imported, XE_PL_SYSTEM)) { + KUNIT_FAIL(test, "Importer wasn't properly evicted.\n"); + return; + } + + /* Re-validate the importer. This should move also exporter in. */ + ret = xe_bo_validate(imported, NULL, false); + if (ret) { + if (ret != -EINTR && ret != -ERESTARTSYS) + KUNIT_FAIL(test, "Validating importer failed with err=%d.\n", + ret); + return; + } + + /* + * If on different devices, the exporter is kept in system if + * possible, saving a migration step as the transfer is just + * likely as fast from system memory. + */ + if (params->mem_mask & XE_BO_CREATE_SYSTEM_BIT) + KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(exported, XE_PL_TT)); + else + KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(exported, mem_type)); + + if (params->force_different_devices) + KUNIT_EXPECT_TRUE(test, xe_bo_is_mem_type(imported, XE_PL_TT)); + else + KUNIT_EXPECT_TRUE(test, exported == imported); +} + +static void xe_test_dmabuf_import_same_driver(struct xe_device *xe) +{ + struct kunit *test = xe_cur_kunit(); + struct dma_buf_test_params *params = to_dma_buf_test_params(test->priv); + struct drm_gem_object *import; + struct dma_buf *dmabuf; + struct xe_bo *bo; + size_t size; + + /* No VRAM on this device? */ + if (!ttm_manager_type(&xe->ttm, XE_PL_VRAM0) && + (params->mem_mask & XE_BO_CREATE_VRAM0_BIT)) + return; + + size = PAGE_SIZE; + if ((params->mem_mask & XE_BO_CREATE_VRAM0_BIT) && + xe->info.vram_flags & XE_VRAM_FLAGS_NEED64K) + size = SZ_64K; + + kunit_info(test, "running %s\n", __func__); + bo = xe_bo_create_user(xe, NULL, NULL, size, DRM_XE_GEM_CPU_CACHING_WC, + ttm_bo_type_device, XE_BO_CREATE_USER_BIT | params->mem_mask); + if (IS_ERR(bo)) { + KUNIT_FAIL(test, "xe_bo_create() failed with err=%ld\n", + PTR_ERR(bo)); + return; + } + + dmabuf = xe_gem_prime_export(&bo->ttm.base, 0); + if (IS_ERR(dmabuf)) { + KUNIT_FAIL(test, "xe_gem_prime_export() failed with err=%ld\n", + PTR_ERR(dmabuf)); + goto out; + } + + import = xe_gem_prime_import(&xe->drm, dmabuf); + if (!IS_ERR(import)) { + struct xe_bo *import_bo = gem_to_xe_bo(import); + + /* + * Did import succeed when it shouldn't due to lack of p2p support? + */ + if (params->force_different_devices && + !p2p_enabled(params) && + !(params->mem_mask & XE_BO_CREATE_SYSTEM_BIT)) { + KUNIT_FAIL(test, + "xe_gem_prime_import() succeeded when it shouldn't have\n"); + } else { + int err; + + /* Is everything where we expect it to be? */ + xe_bo_lock(import_bo, false); + err = xe_bo_validate(import_bo, NULL, false); + + /* Pinning in VRAM is not allowed. */ + if (!is_dynamic(params) && + params->force_different_devices && + !(params->mem_mask & XE_BO_CREATE_SYSTEM_BIT)) + KUNIT_EXPECT_EQ(test, err, -EINVAL); + /* Otherwise only expect interrupts or success. */ + else if (err && err != -EINTR && err != -ERESTARTSYS) + KUNIT_EXPECT_TRUE(test, !err || err == -EINTR || + err == -ERESTARTSYS); + + if (!err) + check_residency(test, bo, import_bo, dmabuf); + xe_bo_unlock(import_bo); + } + drm_gem_object_put(import); + } else if (PTR_ERR(import) != -EOPNOTSUPP) { + /* Unexpected error code. */ + KUNIT_FAIL(test, + "xe_gem_prime_import failed with the wrong err=%ld\n", + PTR_ERR(import)); + } else if (!params->force_different_devices || + p2p_enabled(params) || + (params->mem_mask & XE_BO_CREATE_SYSTEM_BIT)) { + /* Shouldn't fail if we can reuse same bo, use p2p or use system */ + KUNIT_FAIL(test, "dynamic p2p attachment failed with err=%ld\n", + PTR_ERR(import)); + } + dma_buf_put(dmabuf); +out: + drm_gem_object_put(&bo->ttm.base); +} + +static const struct dma_buf_attach_ops nop2p_attach_ops = { + .allow_peer2peer = false, + .move_notify = xe_dma_buf_move_notify +}; + +/* + * We test the implementation with bos of different residency and with + * importers with different capabilities; some lacking p2p support and some + * lacking dynamic capabilities (attach_ops == NULL). We also fake + * different devices avoiding the import shortcut that just reuses the same + * gem object. + */ +static const struct dma_buf_test_params test_params[] = { + {.mem_mask = XE_BO_CREATE_VRAM0_BIT, + .attach_ops = &xe_dma_buf_attach_ops}, + {.mem_mask = XE_BO_CREATE_VRAM0_BIT, + .attach_ops = &xe_dma_buf_attach_ops, + .force_different_devices = true}, + + {.mem_mask = XE_BO_CREATE_VRAM0_BIT, + .attach_ops = &nop2p_attach_ops}, + {.mem_mask = XE_BO_CREATE_VRAM0_BIT, + .attach_ops = &nop2p_attach_ops, + .force_different_devices = true}, + + {.mem_mask = XE_BO_CREATE_VRAM0_BIT}, + {.mem_mask = XE_BO_CREATE_VRAM0_BIT, + .force_different_devices = true}, + + {.mem_mask = XE_BO_CREATE_SYSTEM_BIT, + .attach_ops = &xe_dma_buf_attach_ops}, + {.mem_mask = XE_BO_CREATE_SYSTEM_BIT, + .attach_ops = &xe_dma_buf_attach_ops, + .force_different_devices = true}, + + {.mem_mask = XE_BO_CREATE_SYSTEM_BIT, + .attach_ops = &nop2p_attach_ops}, + {.mem_mask = XE_BO_CREATE_SYSTEM_BIT, + .attach_ops = &nop2p_attach_ops, + .force_different_devices = true}, + + {.mem_mask = XE_BO_CREATE_SYSTEM_BIT}, + {.mem_mask = XE_BO_CREATE_SYSTEM_BIT, + .force_different_devices = true}, + + {.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT, + .attach_ops = &xe_dma_buf_attach_ops}, + {.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT, + .attach_ops = &xe_dma_buf_attach_ops, + .force_different_devices = true}, + + {.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT, + .attach_ops = &nop2p_attach_ops}, + {.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT, + .attach_ops = &nop2p_attach_ops, + .force_different_devices = true}, + + {.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT}, + {.mem_mask = XE_BO_CREATE_SYSTEM_BIT | XE_BO_CREATE_VRAM0_BIT, + .force_different_devices = true}, + + {} +}; + +static int dma_buf_run_device(struct xe_device *xe) +{ + const struct dma_buf_test_params *params; + struct kunit *test = xe_cur_kunit(); + + for (params = test_params; params->mem_mask; ++params) { + struct dma_buf_test_params p = *params; + + p.base.id = XE_TEST_LIVE_DMA_BUF; + test->priv = &p; + xe_test_dmabuf_import_same_driver(xe); + } + + /* A non-zero return would halt iteration over driver devices */ + return 0; +} + +void xe_dma_buf_kunit(struct kunit *test) +{ + xe_call_for_each_device(dma_buf_run_device); +} +EXPORT_SYMBOL_IF_KUNIT(xe_dma_buf_kunit); diff --git a/drivers/gpu/drm/xe/tests/xe_dma_buf_test.c b/drivers/gpu/drm/xe/tests/xe_dma_buf_test.c new file mode 100644 index 0000000000..9f5a9cda8c --- /dev/null +++ b/drivers/gpu/drm/xe/tests/xe_dma_buf_test.c @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright © 2022 Intel Corporation + */ + +#include "xe_dma_buf_test.h" + +#include <kunit/test.h> + +static struct kunit_case xe_dma_buf_tests[] = { + KUNIT_CASE(xe_dma_buf_kunit), + {} +}; + +static struct kunit_suite xe_dma_buf_test_suite = { + .name = "xe_dma_buf", + .test_cases = xe_dma_buf_tests, +}; + +kunit_test_suite(xe_dma_buf_test_suite); + +MODULE_AUTHOR("Intel Corporation"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("xe_dma_buf kunit test"); +MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING); diff --git a/drivers/gpu/drm/xe/tests/xe_dma_buf_test.h b/drivers/gpu/drm/xe/tests/xe_dma_buf_test.h new file mode 100644 index 0000000000..e6b464ddd5 --- /dev/null +++ b/drivers/gpu/drm/xe/tests/xe_dma_buf_test.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 AND MIT */ +/* + * Copyright © 2023 Intel Corporation + */ + +#ifndef _XE_DMA_BUF_TEST_H_ +#define _XE_DMA_BUF_TEST_H_ + +struct kunit; + +void xe_dma_buf_kunit(struct kunit *test); + +#endif diff --git a/drivers/gpu/drm/xe/tests/xe_lmtt_test.c b/drivers/gpu/drm/xe/tests/xe_lmtt_test.c new file mode 100644 index 0000000000..1f1557c45a --- /dev/null +++ b/drivers/gpu/drm/xe/tests/xe_lmtt_test.c @@ -0,0 +1,73 @@ +// SPDX-License-Identifier: GPL-2.0 AND MIT +/* + * Copyright © 2023 Intel Corporation + */ + +#include <kunit/test.h> + +static const struct lmtt_ops_param { + const char *desc; + const struct xe_lmtt_ops *ops; +} lmtt_ops_params[] = { + { "2-level", &lmtt_2l_ops, }, + { "multi-level", &lmtt_ml_ops, }, +}; + +static void lmtt_ops_param_get_desc(const struct lmtt_ops_param *p, char *desc) +{ + snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s", p->desc); +} + +KUNIT_ARRAY_PARAM(lmtt_ops, lmtt_ops_params, lmtt_ops_param_get_desc); + +static void test_ops(struct kunit *test) +{ + const struct lmtt_ops_param *p = test->param_value; + const struct xe_lmtt_ops *ops = p->ops; + unsigned int n; + + KUNIT_ASSERT_NOT_NULL(test, ops->lmtt_root_pd_level); + KUNIT_ASSERT_NOT_NULL(test, ops->lmtt_pte_num); + KUNIT_ASSERT_NOT_NULL(test, ops->lmtt_pte_size); + KUNIT_ASSERT_NOT_NULL(test, ops->lmtt_pte_shift); + KUNIT_ASSERT_NOT_NULL(test, ops->lmtt_pte_index); + KUNIT_ASSERT_NOT_NULL(test, ops->lmtt_pte_encode); + + KUNIT_EXPECT_NE(test, ops->lmtt_root_pd_level(), 0); + + for (n = 0; n <= ops->lmtt_root_pd_level(); n++) { + KUNIT_EXPECT_NE_MSG(test, ops->lmtt_pte_num(n), 0, + "level=%u", n); + KUNIT_EXPECT_NE_MSG(test, ops->lmtt_pte_size(n), 0, + "level=%u", n); + KUNIT_EXPECT_NE_MSG(test, ops->lmtt_pte_encode(0, n), LMTT_PTE_INVALID, + "level=%u", n); + } + + for (n = 0; n < ops->lmtt_root_pd_level(); n++) { + u64 addr = BIT_ULL(ops->lmtt_pte_shift(n)); + + KUNIT_EXPECT_NE_MSG(test, ops->lmtt_pte_shift(n), 0, + "level=%u", n); + KUNIT_EXPECT_EQ_MSG(test, ops->lmtt_pte_index(addr - 1, n), 0, + "addr=%#llx level=%u", addr, n); + KUNIT_EXPECT_EQ_MSG(test, ops->lmtt_pte_index(addr + 1, n), 1, + "addr=%#llx level=%u", addr, n); + KUNIT_EXPECT_EQ_MSG(test, ops->lmtt_pte_index(addr * 2 - 1, n), 1, + "addr=%#llx level=%u", addr, n); + KUNIT_EXPECT_EQ_MSG(test, ops->lmtt_pte_index(addr * 2, n), 2, + "addr=%#llx level=%u", addr, n); + } +} + +static struct kunit_case lmtt_test_cases[] = { + KUNIT_CASE_PARAM(test_ops, lmtt_ops_gen_params), + {} +}; + +static struct kunit_suite lmtt_suite = { + .name = "lmtt", + .test_cases = lmtt_test_cases, +}; + +kunit_test_suites(&lmtt_suite); diff --git a/drivers/gpu/drm/xe/tests/xe_migrate.c b/drivers/gpu/drm/xe/tests/xe_migrate.c new file mode 100644 index 0000000000..c347e2c29f --- /dev/null +++ b/drivers/gpu/drm/xe/tests/xe_migrate.c @@ -0,0 +1,444 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2020-2022 Intel Corporation + */ + +#include <kunit/test.h> +#include <kunit/visibility.h> + +#include "tests/xe_migrate_test.h" +#include "tests/xe_pci_test.h" + +#include "xe_pci.h" + +static bool sanity_fence_failed(struct xe_device *xe, struct dma_fence *fence, + const char *str, struct kunit *test) +{ + long ret; + + if (IS_ERR(fence)) { + KUNIT_FAIL(test, "Failed to create fence for %s: %li\n", str, + PTR_ERR(fence)); + return true; + } + if (!fence) + return true; + + ret = dma_fence_wait_timeout(fence, false, 5 * HZ); + if (ret <= 0) { + KUNIT_FAIL(test, "Fence timed out for %s: %li\n", str, ret); + return true; + } + + return false; +} + +static int run_sanity_job(struct xe_migrate *m, struct xe_device *xe, + struct xe_bb *bb, u32 second_idx, const char *str, + struct kunit *test) +{ + u64 batch_base = xe_migrate_batch_base(m, xe->info.has_usm); + struct xe_sched_job *job = xe_bb_create_migration_job(m->q, bb, + batch_base, + second_idx); + struct dma_fence *fence; + + if (IS_ERR(job)) { + KUNIT_FAIL(test, "Failed to allocate fake pt: %li\n", + PTR_ERR(job)); + return PTR_ERR(job); + } + + xe_sched_job_arm(job); + fence = dma_fence_get(&job->drm.s_fence->finished); + xe_sched_job_push(job); + + if (sanity_fence_failed(xe, fence, str, test)) + return -ETIMEDOUT; + + dma_fence_put(fence); + kunit_info(test, "%s: Job completed\n", str); + return 0; +} + +static void +sanity_populate_cb(struct xe_migrate_pt_update *pt_update, + struct xe_tile *tile, struct iosys_map *map, void *dst, + u32 qword_ofs, u32 num_qwords, + const struct xe_vm_pgtable_update *update) +{ + struct migrate_test_params *p = + to_migrate_test_params(xe_cur_kunit_priv(XE_TEST_LIVE_MIGRATE)); + int i; + u64 *ptr = dst; + u64 value; + + for (i = 0; i < num_qwords; i++) { + value = (qword_ofs + i - update->ofs) * 0x1111111111111111ULL; + if (map) + xe_map_wr(tile_to_xe(tile), map, (qword_ofs + i) * + sizeof(u64), u64, value); + else + ptr[i] = value; + } + + kunit_info(xe_cur_kunit(), "Used %s.\n", map ? "CPU" : "GPU"); + if (p->force_gpu && map) + KUNIT_FAIL(xe_cur_kunit(), "GPU pagetable update used CPU.\n"); +} + +static const struct xe_migrate_pt_update_ops sanity_ops = { + .populate = sanity_populate_cb, +}; + +#define check(_retval, _expected, str, _test) \ + do { if ((_retval) != (_expected)) { \ + KUNIT_FAIL(_test, "Sanity check failed: " str \ + " expected %llx, got %llx\n", \ + (u64)(_expected), (u64)(_retval)); \ + } } while (0) + +static void test_copy(struct xe_migrate *m, struct xe_bo *bo, + struct kunit *test, u32 region) +{ + struct xe_device *xe = tile_to_xe(m->tile); + u64 retval, expected = 0; + bool big = bo->size >= SZ_2M; + struct dma_fence *fence; + const char *str = big ? "Copying big bo" : "Copying small bo"; + int err; + + struct xe_bo *remote = xe_bo_create_locked(xe, m->tile, NULL, + bo->size, + ttm_bo_type_kernel, + region | + XE_BO_NEEDS_CPU_ACCESS); + if (IS_ERR(remote)) { + KUNIT_FAIL(test, "Failed to allocate remote bo for %s: %pe\n", + str, remote); + return; + } + + err = xe_bo_validate(remote, NULL, false); + if (err) { + KUNIT_FAIL(test, "Failed to validate system bo for %s: %i\n", + str, err); + goto out_unlock; + } + + err = xe_bo_vmap(remote); + if (err) { + KUNIT_FAIL(test, "Failed to vmap system bo for %s: %i\n", + str, err); + goto out_unlock; + } + + xe_map_memset(xe, &remote->vmap, 0, 0xd0, remote->size); + fence = xe_migrate_clear(m, remote, remote->ttm.resource); + if (!sanity_fence_failed(xe, fence, big ? "Clearing remote big bo" : + "Clearing remote small bo", test)) { + retval = xe_map_rd(xe, &remote->vmap, 0, u64); + check(retval, expected, "remote first offset should be cleared", + test); + retval = xe_map_rd(xe, &remote->vmap, remote->size - 8, u64); + check(retval, expected, "remote last offset should be cleared", + test); + } + dma_fence_put(fence); + + /* Try to copy 0xc0 from remote to vram with 2MB or 64KiB/4KiB pages */ + xe_map_memset(xe, &remote->vmap, 0, 0xc0, remote->size); + xe_map_memset(xe, &bo->vmap, 0, 0xd0, bo->size); + + expected = 0xc0c0c0c0c0c0c0c0; + fence = xe_migrate_copy(m, remote, bo, remote->ttm.resource, + bo->ttm.resource, false); + if (!sanity_fence_failed(xe, fence, big ? "Copying big bo remote -> vram" : + "Copying small bo remote -> vram", test)) { + retval = xe_map_rd(xe, &bo->vmap, 0, u64); + check(retval, expected, + "remote -> vram bo first offset should be copied", test); + retval = xe_map_rd(xe, &bo->vmap, bo->size - 8, u64); + check(retval, expected, + "remote -> vram bo offset should be copied", test); + } + dma_fence_put(fence); + + /* And other way around.. slightly hacky.. */ + xe_map_memset(xe, &remote->vmap, 0, 0xd0, remote->size); + xe_map_memset(xe, &bo->vmap, 0, 0xc0, bo->size); + + fence = xe_migrate_copy(m, bo, remote, bo->ttm.resource, + remote->ttm.resource, false); + if (!sanity_fence_failed(xe, fence, big ? "Copying big bo vram -> remote" : + "Copying small bo vram -> remote", test)) { + retval = xe_map_rd(xe, &remote->vmap, 0, u64); + check(retval, expected, + "vram -> remote bo first offset should be copied", test); + retval = xe_map_rd(xe, &remote->vmap, bo->size - 8, u64); + check(retval, expected, + "vram -> remote bo last offset should be copied", test); + } + dma_fence_put(fence); + + xe_bo_vunmap(remote); +out_unlock: + xe_bo_unlock(remote); + xe_bo_put(remote); +} + +static void test_copy_sysmem(struct xe_migrate *m, struct xe_bo *bo, + struct kunit *test) +{ + test_copy(m, bo, test, XE_BO_CREATE_SYSTEM_BIT); +} + +static void test_copy_vram(struct xe_migrate *m, struct xe_bo *bo, + struct kunit *test) +{ + u32 region; + + if (bo->ttm.resource->mem_type == XE_PL_SYSTEM) + return; + + if (bo->ttm.resource->mem_type == XE_PL_VRAM0) + region = XE_BO_CREATE_VRAM1_BIT; + else + region = XE_BO_CREATE_VRAM0_BIT; + test_copy(m, bo, test, region); +} + +static void test_pt_update(struct xe_migrate *m, struct xe_bo *pt, + struct kunit *test, bool force_gpu) +{ + struct xe_device *xe = tile_to_xe(m->tile); + struct dma_fence *fence; + u64 retval, expected; + ktime_t then, now; + int i; + + struct xe_vm_pgtable_update update = { + .ofs = 1, + .qwords = 0x10, + .pt_bo = pt, + }; + struct xe_migrate_pt_update pt_update = { + .ops = &sanity_ops, + }; + struct migrate_test_params p = { + .base.id = XE_TEST_LIVE_MIGRATE, + .force_gpu = force_gpu, + }; + + test->priv = &p; + /* Test xe_migrate_update_pgtables() updates the pagetable as expected */ + expected = 0xf0f0f0f0f0f0f0f0ULL; + xe_map_memset(xe, &pt->vmap, 0, (u8)expected, pt->size); + + then = ktime_get(); + fence = xe_migrate_update_pgtables(m, m->q->vm, NULL, m->q, &update, 1, + NULL, 0, &pt_update); + now = ktime_get(); + if (sanity_fence_failed(xe, fence, "Migration pagetable update", test)) + return; + + kunit_info(test, "Updating without syncing took %llu us,\n", + (unsigned long long)ktime_to_us(ktime_sub(now, then))); + + dma_fence_put(fence); + retval = xe_map_rd(xe, &pt->vmap, 0, u64); + check(retval, expected, "PTE[0] must stay untouched", test); + + for (i = 0; i < update.qwords; i++) { + retval = xe_map_rd(xe, &pt->vmap, (update.ofs + i) * 8, u64); + check(retval, i * 0x1111111111111111ULL, "PTE update", test); + } + + retval = xe_map_rd(xe, &pt->vmap, 8 * (update.ofs + update.qwords), + u64); + check(retval, expected, "PTE[0x11] must stay untouched", test); +} + +static void xe_migrate_sanity_test(struct xe_migrate *m, struct kunit *test) +{ + struct xe_tile *tile = m->tile; + struct xe_device *xe = tile_to_xe(tile); + struct xe_bo *pt, *bo = m->pt_bo, *big, *tiny; + struct xe_res_cursor src_it; + struct dma_fence *fence; + u64 retval, expected; + struct xe_bb *bb; + int err; + u8 id = tile->id; + + err = xe_bo_vmap(bo); + if (err) { + KUNIT_FAIL(test, "Failed to vmap our pagetables: %li\n", + PTR_ERR(bo)); + return; + } + + big = xe_bo_create_pin_map(xe, tile, m->q->vm, SZ_4M, + ttm_bo_type_kernel, + XE_BO_CREATE_VRAM_IF_DGFX(tile) | + XE_BO_CREATE_PINNED_BIT); + if (IS_ERR(big)) { + KUNIT_FAIL(test, "Failed to allocate bo: %li\n", PTR_ERR(big)); + goto vunmap; + } + + pt = xe_bo_create_pin_map(xe, tile, m->q->vm, XE_PAGE_SIZE, + ttm_bo_type_kernel, + XE_BO_CREATE_VRAM_IF_DGFX(tile) | + XE_BO_CREATE_PINNED_BIT); + if (IS_ERR(pt)) { + KUNIT_FAIL(test, "Failed to allocate fake pt: %li\n", + PTR_ERR(pt)); + goto free_big; + } + + tiny = xe_bo_create_pin_map(xe, tile, m->q->vm, + 2 * SZ_4K, + ttm_bo_type_kernel, + XE_BO_CREATE_VRAM_IF_DGFX(tile) | + XE_BO_CREATE_PINNED_BIT); + if (IS_ERR(tiny)) { + KUNIT_FAIL(test, "Failed to allocate fake pt: %li\n", + PTR_ERR(pt)); + goto free_pt; + } + + bb = xe_bb_new(tile->primary_gt, 32, xe->info.has_usm); + if (IS_ERR(bb)) { + KUNIT_FAIL(test, "Failed to create batchbuffer: %li\n", + PTR_ERR(bb)); + goto free_tiny; + } + + kunit_info(test, "Starting tests, top level PT addr: %lx, special pagetable base addr: %lx\n", + (unsigned long)xe_bo_main_addr(m->q->vm->pt_root[id]->bo, XE_PAGE_SIZE), + (unsigned long)xe_bo_main_addr(m->pt_bo, XE_PAGE_SIZE)); + + /* First part of the test, are we updating our pagetable bo with a new entry? */ + xe_map_wr(xe, &bo->vmap, XE_PAGE_SIZE * (NUM_KERNEL_PDE - 1), u64, + 0xdeaddeadbeefbeef); + expected = m->q->vm->pt_ops->pte_encode_bo(pt, 0, xe->pat.idx[XE_CACHE_WB], 0); + if (m->q->vm->flags & XE_VM_FLAG_64K) + expected |= XE_PTE_PS64; + if (xe_bo_is_vram(pt)) + xe_res_first(pt->ttm.resource, 0, pt->size, &src_it); + else + xe_res_first_sg(xe_bo_sg(pt), 0, pt->size, &src_it); + + emit_pte(m, bb, NUM_KERNEL_PDE - 1, xe_bo_is_vram(pt), false, + &src_it, XE_PAGE_SIZE, pt->ttm.resource); + + run_sanity_job(m, xe, bb, bb->len, "Writing PTE for our fake PT", test); + + retval = xe_map_rd(xe, &bo->vmap, XE_PAGE_SIZE * (NUM_KERNEL_PDE - 1), + u64); + check(retval, expected, "PTE entry write", test); + + /* Now try to write data to our newly mapped 'pagetable', see if it succeeds */ + bb->len = 0; + bb->cs[bb->len++] = MI_BATCH_BUFFER_END; + xe_map_wr(xe, &pt->vmap, 0, u32, 0xdeaddead); + expected = 0; + + emit_clear(tile->primary_gt, bb, xe_migrate_vm_addr(NUM_KERNEL_PDE - 1, 0), 4, 4, + IS_DGFX(xe)); + run_sanity_job(m, xe, bb, 1, "Writing to our newly mapped pagetable", + test); + + retval = xe_map_rd(xe, &pt->vmap, 0, u32); + check(retval, expected, "Write to PT after adding PTE", test); + + /* Sanity checks passed, try the full ones! */ + + /* Clear a small bo */ + kunit_info(test, "Clearing small buffer object\n"); + xe_map_memset(xe, &tiny->vmap, 0, 0x22, tiny->size); + expected = 0; + fence = xe_migrate_clear(m, tiny, tiny->ttm.resource); + if (sanity_fence_failed(xe, fence, "Clearing small bo", test)) + goto out; + + dma_fence_put(fence); + retval = xe_map_rd(xe, &tiny->vmap, 0, u32); + check(retval, expected, "Command clear small first value", test); + retval = xe_map_rd(xe, &tiny->vmap, tiny->size - 4, u32); + check(retval, expected, "Command clear small last value", test); + + kunit_info(test, "Copying small buffer object to system\n"); + test_copy_sysmem(m, tiny, test); + if (xe->info.tile_count > 1) { + kunit_info(test, "Copying small buffer object to other vram\n"); + test_copy_vram(m, tiny, test); + } + + /* Clear a big bo */ + kunit_info(test, "Clearing big buffer object\n"); + xe_map_memset(xe, &big->vmap, 0, 0x11, big->size); + expected = 0; + fence = xe_migrate_clear(m, big, big->ttm.resource); + if (sanity_fence_failed(xe, fence, "Clearing big bo", test)) + goto out; + + dma_fence_put(fence); + retval = xe_map_rd(xe, &big->vmap, 0, u32); + check(retval, expected, "Command clear big first value", test); + retval = xe_map_rd(xe, &big->vmap, big->size - 4, u32); + check(retval, expected, "Command clear big last value", test); + + kunit_info(test, "Copying big buffer object to system\n"); + test_copy_sysmem(m, big, test); + if (xe->info.tile_count > 1) { + kunit_info(test, "Copying big buffer object to other vram\n"); + test_copy_vram(m, big, test); + } + + kunit_info(test, "Testing page table update using CPU if GPU idle.\n"); + test_pt_update(m, pt, test, false); + kunit_info(test, "Testing page table update using GPU\n"); + test_pt_update(m, pt, test, true); + +out: + xe_bb_free(bb, NULL); +free_tiny: + xe_bo_unpin(tiny); + xe_bo_put(tiny); +free_pt: + xe_bo_unpin(pt); + xe_bo_put(pt); +free_big: + xe_bo_unpin(big); + xe_bo_put(big); +vunmap: + xe_bo_vunmap(m->pt_bo); +} + +static int migrate_test_run_device(struct xe_device *xe) +{ + struct kunit *test = xe_cur_kunit(); + struct xe_tile *tile; + int id; + + for_each_tile(tile, xe, id) { + struct xe_migrate *m = tile->migrate; + + kunit_info(test, "Testing tile id %d.\n", id); + xe_vm_lock(m->q->vm, true); + xe_device_mem_access_get(xe); + xe_migrate_sanity_test(m, test); + xe_device_mem_access_put(xe); + xe_vm_unlock(m->q->vm); + } + + return 0; +} + +void xe_migrate_sanity_kunit(struct kunit *test) +{ + xe_call_for_each_device(migrate_test_run_device); +} +EXPORT_SYMBOL_IF_KUNIT(xe_migrate_sanity_kunit); diff --git a/drivers/gpu/drm/xe/tests/xe_migrate_test.c b/drivers/gpu/drm/xe/tests/xe_migrate_test.c new file mode 100644 index 0000000000..cf0c173b94 --- /dev/null +++ b/drivers/gpu/drm/xe/tests/xe_migrate_test.c @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright © 2022 Intel Corporation + */ + +#include "xe_migrate_test.h" + +#include <kunit/test.h> + +static struct kunit_case xe_migrate_tests[] = { + KUNIT_CASE(xe_migrate_sanity_kunit), + {} +}; + +static struct kunit_suite xe_migrate_test_suite = { + .name = "xe_migrate", + .test_cases = xe_migrate_tests, +}; + +kunit_test_suite(xe_migrate_test_suite); + +MODULE_AUTHOR("Intel Corporation"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("xe_migrate kunit test"); +MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING); diff --git a/drivers/gpu/drm/xe/tests/xe_migrate_test.h b/drivers/gpu/drm/xe/tests/xe_migrate_test.h new file mode 100644 index 0000000000..7c645c6682 --- /dev/null +++ b/drivers/gpu/drm/xe/tests/xe_migrate_test.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 AND MIT */ +/* + * Copyright © 2023 Intel Corporation + */ + +#ifndef _XE_MIGRATE_TEST_H_ +#define _XE_MIGRATE_TEST_H_ + +struct kunit; + +void xe_migrate_sanity_kunit(struct kunit *test); + +#endif diff --git a/drivers/gpu/drm/xe/tests/xe_mocs.c b/drivers/gpu/drm/xe/tests/xe_mocs.c new file mode 100644 index 0000000000..7dd34f94e8 --- /dev/null +++ b/drivers/gpu/drm/xe/tests/xe_mocs.c @@ -0,0 +1,130 @@ +// SPDX-License-Identifier: GPL-2.0 AND MIT +/* + * Copyright © 2022 Intel Corporation + */ + +#include <kunit/test.h> +#include <kunit/visibility.h> + +#include "tests/xe_mocs_test.h" +#include "tests/xe_pci_test.h" +#include "tests/xe_test.h" + +#include "xe_pci.h" +#include "xe_gt.h" +#include "xe_mocs.h" +#include "xe_device.h" + +struct live_mocs { + struct xe_mocs_info table; +}; + +static int live_mocs_init(struct live_mocs *arg, struct xe_gt *gt) +{ + unsigned int flags; + struct kunit *test = xe_cur_kunit(); + + memset(arg, 0, sizeof(*arg)); + + flags = get_mocs_settings(gt_to_xe(gt), &arg->table); + + kunit_info(test, "table size %d", arg->table.size); + kunit_info(test, "table uc_index %d", arg->table.uc_index); + kunit_info(test, "table n_entries %d", arg->table.n_entries); + + return flags; +} + +static void read_l3cc_table(struct xe_gt *gt, + const struct xe_mocs_info *info) +{ + unsigned int i; + u32 l3cc; + u32 reg_val; + u32 ret; + + struct kunit *test = xe_cur_kunit(); + + xe_device_mem_access_get(gt_to_xe(gt)); + ret = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + KUNIT_ASSERT_EQ_MSG(test, ret, 0, "Forcewake Failed.\n"); + mocs_dbg(>_to_xe(gt)->drm, "L3CC entries:%d\n", info->n_entries); + for (i = 0; + i < (info->n_entries + 1) / 2 ? + (l3cc = l3cc_combine(get_entry_l3cc(info, 2 * i), + get_entry_l3cc(info, 2 * i + 1))), 1 : 0; + i++) { + if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1250) + reg_val = xe_gt_mcr_unicast_read_any(gt, XEHP_LNCFCMOCS(i)); + else + reg_val = xe_mmio_read32(gt, XELP_LNCFCMOCS(i)); + mocs_dbg(>_to_xe(gt)->drm, "%d 0x%x 0x%x 0x%x\n", i, + XELP_LNCFCMOCS(i).addr, reg_val, l3cc); + if (reg_val != l3cc) + KUNIT_FAIL(test, "l3cc reg 0x%x has incorrect val.\n", + XELP_LNCFCMOCS(i).addr); + } + xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); + xe_device_mem_access_put(gt_to_xe(gt)); +} + +static void read_mocs_table(struct xe_gt *gt, + const struct xe_mocs_info *info) +{ + struct xe_device *xe = gt_to_xe(gt); + + unsigned int i; + u32 mocs; + u32 reg_val; + u32 ret; + + struct kunit *test = xe_cur_kunit(); + + xe_device_mem_access_get(gt_to_xe(gt)); + ret = xe_force_wake_get(gt_to_fw(gt), XE_FW_GT); + KUNIT_ASSERT_EQ_MSG(test, ret, 0, "Forcewake Failed.\n"); + mocs_dbg(>_to_xe(gt)->drm, "Global MOCS entries:%d\n", info->n_entries); + drm_WARN_ONCE(&xe->drm, !info->unused_entries_index, + "Unused entries index should have been defined\n"); + for (i = 0; + i < info->n_entries ? (mocs = get_entry_control(info, i)), 1 : 0; + i++) { + if (GRAPHICS_VERx100(gt_to_xe(gt)) >= 1250) + reg_val = xe_gt_mcr_unicast_read_any(gt, XEHP_GLOBAL_MOCS(i)); + else + reg_val = xe_mmio_read32(gt, XELP_GLOBAL_MOCS(i)); + mocs_dbg(>_to_xe(gt)->drm, "%d 0x%x 0x%x 0x%x\n", i, + XELP_GLOBAL_MOCS(i).addr, reg_val, mocs); + if (reg_val != mocs) + KUNIT_FAIL(test, "mocs reg 0x%x has incorrect val.\n", + XELP_GLOBAL_MOCS(i).addr); + } + xe_force_wake_put(gt_to_fw(gt), XE_FW_GT); + xe_device_mem_access_put(gt_to_xe(gt)); +} + +static int mocs_kernel_test_run_device(struct xe_device *xe) +{ + /* Basic check the system is configured with the expected mocs table */ + + struct live_mocs mocs; + struct xe_gt *gt; + + unsigned int flags; + int id; + + for_each_gt(gt, xe, id) { + flags = live_mocs_init(&mocs, gt); + if (flags & HAS_GLOBAL_MOCS) + read_mocs_table(gt, &mocs.table); + if (flags & HAS_LNCF_MOCS) + read_l3cc_table(gt, &mocs.table); + } + return 0; +} + +void xe_live_mocs_kernel_kunit(struct kunit *test) +{ + xe_call_for_each_device(mocs_kernel_test_run_device); +} +EXPORT_SYMBOL_IF_KUNIT(xe_live_mocs_kernel_kunit); diff --git a/drivers/gpu/drm/xe/tests/xe_mocs_test.c b/drivers/gpu/drm/xe/tests/xe_mocs_test.c new file mode 100644 index 0000000000..421b819fd4 --- /dev/null +++ b/drivers/gpu/drm/xe/tests/xe_mocs_test.c @@ -0,0 +1,25 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright © 2022 Intel Corporation + */ + +#include "xe_mocs_test.h" + +#include <kunit/test.h> + +static struct kunit_case xe_mocs_tests[] = { + KUNIT_CASE(xe_live_mocs_kernel_kunit), + {} +}; + +static struct kunit_suite xe_mocs_test_suite = { + .name = "xe_mocs", + .test_cases = xe_mocs_tests, +}; + +kunit_test_suite(xe_mocs_test_suite); + +MODULE_AUTHOR("Intel Corporation"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("xe_mocs kunit test"); +MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING); diff --git a/drivers/gpu/drm/xe/tests/xe_mocs_test.h b/drivers/gpu/drm/xe/tests/xe_mocs_test.h new file mode 100644 index 0000000000..7faa3575e6 --- /dev/null +++ b/drivers/gpu/drm/xe/tests/xe_mocs_test.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 AND MIT */ +/* + * Copyright © 2023 Intel Corporation + */ + +#ifndef _XE_MOCS_TEST_H_ +#define _XE_MOCS_TEST_H_ + +struct kunit; + +void xe_live_mocs_kernel_kunit(struct kunit *test); + +#endif diff --git a/drivers/gpu/drm/xe/tests/xe_pci.c b/drivers/gpu/drm/xe/tests/xe_pci.c new file mode 100644 index 0000000000..602793644f --- /dev/null +++ b/drivers/gpu/drm/xe/tests/xe_pci.c @@ -0,0 +1,166 @@ +// SPDX-License-Identifier: GPL-2.0 AND MIT +/* + * Copyright © 2023 Intel Corporation + */ + +#include "tests/xe_pci_test.h" + +#include "tests/xe_test.h" + +#include <kunit/test-bug.h> +#include <kunit/test.h> +#include <kunit/test-bug.h> +#include <kunit/visibility.h> + +struct kunit_test_data { + int ndevs; + xe_device_fn xe_fn; +}; + +static int dev_to_xe_device_fn(struct device *dev, void *__data) + +{ + struct drm_device *drm = dev_get_drvdata(dev); + struct kunit_test_data *data = __data; + int ret = 0; + int idx; + + data->ndevs++; + + if (drm_dev_enter(drm, &idx)) + ret = data->xe_fn(to_xe_device(dev_get_drvdata(dev))); + drm_dev_exit(idx); + + return ret; +} + +/** + * xe_call_for_each_device - Iterate over all devices this driver binds to + * @xe_fn: Function to call for each device. + * + * This function iterated over all devices this driver binds to, and calls + * @xe_fn: for each one of them. If the called function returns anything else + * than 0, iteration is stopped and the return value is returned by this + * function. Across each function call, drm_dev_enter() / drm_dev_exit() is + * called for the corresponding drm device. + * + * Return: Number of devices iterated or + * the error code of a call to @xe_fn returning an error code. + */ +int xe_call_for_each_device(xe_device_fn xe_fn) +{ + int ret; + struct kunit_test_data data = { + .xe_fn = xe_fn, + .ndevs = 0, + }; + + ret = driver_for_each_device(&xe_pci_driver.driver, NULL, + &data, dev_to_xe_device_fn); + + if (!data.ndevs) + kunit_skip(current->kunit_test, "test runs only on hardware\n"); + + return ret ?: data.ndevs; +} + +/** + * xe_call_for_each_graphics_ip - Iterate over all recognized graphics IPs + * @xe_fn: Function to call for each device. + * + * This function iterates over the descriptors for all graphics IPs recognized + * by the driver and calls @xe_fn: for each one of them. + */ +void xe_call_for_each_graphics_ip(xe_graphics_fn xe_fn) +{ + const struct xe_graphics_desc *ip, *last = NULL; + + for (int i = 0; i < ARRAY_SIZE(graphics_ip_map); i++) { + ip = graphics_ip_map[i].ip; + if (ip == last) + continue; + + xe_fn(ip); + last = ip; + } +} +EXPORT_SYMBOL_IF_KUNIT(xe_call_for_each_graphics_ip); + +/** + * xe_call_for_each_media_ip - Iterate over all recognized media IPs + * @xe_fn: Function to call for each device. + * + * This function iterates over the descriptors for all media IPs recognized + * by the driver and calls @xe_fn: for each one of them. + */ +void xe_call_for_each_media_ip(xe_media_fn xe_fn) +{ + const struct xe_media_desc *ip, *last = NULL; + + for (int i = 0; i < ARRAY_SIZE(media_ip_map); i++) { + ip = media_ip_map[i].ip; + if (ip == last) + continue; + + xe_fn(ip); + last = ip; + } +} +EXPORT_SYMBOL_IF_KUNIT(xe_call_for_each_media_ip); + +static void fake_read_gmdid(struct xe_device *xe, enum xe_gmdid_type type, + u32 *ver, u32 *revid) +{ + struct kunit *test = kunit_get_current_test(); + struct xe_pci_fake_data *data = test->priv; + + if (type == GMDID_MEDIA) { + *ver = data->media_verx100; + *revid = xe_step_to_gmdid(data->media_step); + } else { + *ver = data->graphics_verx100; + *revid = xe_step_to_gmdid(data->graphics_step); + } +} + +int xe_pci_fake_device_init(struct xe_device *xe) +{ + struct kunit *test = kunit_get_current_test(); + struct xe_pci_fake_data *data = test->priv; + const struct pci_device_id *ent = pciidlist; + const struct xe_device_desc *desc; + const struct xe_subplatform_desc *subplatform_desc; + + if (!data) { + desc = (const void *)ent->driver_data; + subplatform_desc = NULL; + goto done; + } + + for (ent = pciidlist; ent->device; ent++) { + desc = (const void *)ent->driver_data; + if (desc->platform == data->platform) + break; + } + + if (!ent->device) + return -ENODEV; + + for (subplatform_desc = desc->subplatforms; + subplatform_desc && subplatform_desc->subplatform; + subplatform_desc++) + if (subplatform_desc->subplatform == data->subplatform) + break; + + if (data->subplatform != XE_SUBPLATFORM_NONE && !subplatform_desc) + return -ENODEV; + +done: + kunit_activate_static_stub(test, read_gmdid, fake_read_gmdid); + + xe_info_init_early(xe, desc, subplatform_desc); + xe_info_init(xe, desc->graphics, desc->media); + + return 0; +} +EXPORT_SYMBOL_IF_KUNIT(xe_pci_fake_device_init); diff --git a/drivers/gpu/drm/xe/tests/xe_pci_test.c b/drivers/gpu/drm/xe/tests/xe_pci_test.c new file mode 100644 index 0000000000..171e4180f1 --- /dev/null +++ b/drivers/gpu/drm/xe/tests/xe_pci_test.c @@ -0,0 +1,71 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright © 2023 Intel Corporation + */ + +#include <drm/drm_drv.h> +#include <drm/drm_kunit_helpers.h> + +#include <kunit/test.h> + +#include "tests/xe_test.h" + +#include "xe_device.h" +#include "xe_pci_test.h" +#include "xe_pci_types.h" + +static void check_graphics_ip(const struct xe_graphics_desc *graphics) +{ + struct kunit *test = xe_cur_kunit(); + u64 mask = graphics->hw_engine_mask; + + /* RCS, CCS, and BCS engines are allowed on the graphics IP */ + mask &= ~(XE_HW_ENGINE_RCS_MASK | + XE_HW_ENGINE_CCS_MASK | + XE_HW_ENGINE_BCS_MASK); + + /* Any remaining engines are an error */ + KUNIT_ASSERT_EQ(test, mask, 0); +} + +static void check_media_ip(const struct xe_media_desc *media) +{ + struct kunit *test = xe_cur_kunit(); + u64 mask = media->hw_engine_mask; + + /* VCS, VECS and GSCCS engines are allowed on the media IP */ + mask &= ~(XE_HW_ENGINE_VCS_MASK | + XE_HW_ENGINE_VECS_MASK | + XE_HW_ENGINE_GSCCS_MASK); + + /* Any remaining engines are an error */ + KUNIT_ASSERT_EQ(test, mask, 0); +} + +static void xe_gmdid_graphics_ip(struct kunit *test) +{ + xe_call_for_each_graphics_ip(check_graphics_ip); +} + +static void xe_gmdid_media_ip(struct kunit *test) +{ + xe_call_for_each_media_ip(check_media_ip); +} + +static struct kunit_case xe_pci_tests[] = { + KUNIT_CASE(xe_gmdid_graphics_ip), + KUNIT_CASE(xe_gmdid_media_ip), + {} +}; + +static struct kunit_suite xe_pci_test_suite = { + .name = "xe_pci", + .test_cases = xe_pci_tests, +}; + +kunit_test_suite(xe_pci_test_suite); + +MODULE_AUTHOR("Intel Corporation"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("xe_pci kunit test"); +MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING); diff --git a/drivers/gpu/drm/xe/tests/xe_pci_test.h b/drivers/gpu/drm/xe/tests/xe_pci_test.h new file mode 100644 index 0000000000..811ffe5bd9 --- /dev/null +++ b/drivers/gpu/drm/xe/tests/xe_pci_test.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0 AND MIT */ +/* + * Copyright © 2023 Intel Corporation + */ + +#ifndef _XE_PCI_TEST_H_ +#define _XE_PCI_TEST_H_ + +#include <linux/types.h> + +#include "xe_platform_types.h" + +struct xe_device; +struct xe_graphics_desc; +struct xe_media_desc; + +typedef int (*xe_device_fn)(struct xe_device *); +typedef void (*xe_graphics_fn)(const struct xe_graphics_desc *); +typedef void (*xe_media_fn)(const struct xe_media_desc *); + +int xe_call_for_each_device(xe_device_fn xe_fn); +void xe_call_for_each_graphics_ip(xe_graphics_fn xe_fn); +void xe_call_for_each_media_ip(xe_media_fn xe_fn); + +struct xe_pci_fake_data { + enum xe_platform platform; + enum xe_subplatform subplatform; + u32 graphics_verx100; + u32 media_verx100; + u32 graphics_step; + u32 media_step; +}; + +int xe_pci_fake_device_init(struct xe_device *xe); + +#endif diff --git a/drivers/gpu/drm/xe/tests/xe_rtp_test.c b/drivers/gpu/drm/xe/tests/xe_rtp_test.c new file mode 100644 index 0000000000..4a69728976 --- /dev/null +++ b/drivers/gpu/drm/xe/tests/xe_rtp_test.c @@ -0,0 +1,319 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright © 2023 Intel Corporation + */ + +#include <linux/string.h> +#include <linux/xarray.h> + +#include <drm/drm_drv.h> +#include <drm/drm_kunit_helpers.h> + +#include <kunit/test.h> + +#include "regs/xe_gt_regs.h" +#include "regs/xe_reg_defs.h" +#include "xe_device.h" +#include "xe_device_types.h" +#include "xe_pci_test.h" +#include "xe_reg_sr.h" +#include "xe_rtp.h" + +#define REGULAR_REG1 XE_REG(1) +#define REGULAR_REG2 XE_REG(2) +#define REGULAR_REG3 XE_REG(3) +#define MCR_REG1 XE_REG_MCR(1) +#define MCR_REG2 XE_REG_MCR(2) +#define MCR_REG3 XE_REG_MCR(3) +#define MASKED_REG1 XE_REG(1, XE_REG_OPTION_MASKED) + +#undef XE_REG_MCR +#define XE_REG_MCR(...) XE_REG(__VA_ARGS__, .mcr = 1) + +struct rtp_test_case { + const char *name; + struct xe_reg expected_reg; + u32 expected_set_bits; + u32 expected_clr_bits; + unsigned long expected_count; + unsigned int expected_sr_errors; + const struct xe_rtp_entry_sr *entries; +}; + +static bool match_yes(const struct xe_gt *gt, const struct xe_hw_engine *hwe) +{ + return true; +} + +static bool match_no(const struct xe_gt *gt, const struct xe_hw_engine *hwe) +{ + return false; +} + +static const struct rtp_test_case cases[] = { + { + .name = "coalesce-same-reg", + .expected_reg = REGULAR_REG1, + .expected_set_bits = REG_BIT(0) | REG_BIT(1), + .expected_clr_bits = REG_BIT(0) | REG_BIT(1), + .expected_count = 1, + /* Different bits on the same register: create a single entry */ + .entries = (const struct xe_rtp_entry_sr[]) { + { XE_RTP_NAME("basic-1"), + XE_RTP_RULES(FUNC(match_yes)), + XE_RTP_ACTIONS(SET(REGULAR_REG1, REG_BIT(0))) + }, + { XE_RTP_NAME("basic-2"), + XE_RTP_RULES(FUNC(match_yes)), + XE_RTP_ACTIONS(SET(REGULAR_REG1, REG_BIT(1))) + }, + {} + }, + }, + { + .name = "no-match-no-add", + .expected_reg = REGULAR_REG1, + .expected_set_bits = REG_BIT(0), + .expected_clr_bits = REG_BIT(0), + .expected_count = 1, + /* Don't coalesce second entry since rules don't match */ + .entries = (const struct xe_rtp_entry_sr[]) { + { XE_RTP_NAME("basic-1"), + XE_RTP_RULES(FUNC(match_yes)), + XE_RTP_ACTIONS(SET(REGULAR_REG1, REG_BIT(0))) + }, + { XE_RTP_NAME("basic-2"), + XE_RTP_RULES(FUNC(match_no)), + XE_RTP_ACTIONS(SET(REGULAR_REG1, REG_BIT(1))) + }, + {} + }, + }, + { + .name = "no-match-no-add-multiple-rules", + .expected_reg = REGULAR_REG1, + .expected_set_bits = REG_BIT(0), + .expected_clr_bits = REG_BIT(0), + .expected_count = 1, + /* Don't coalesce second entry due to one of the rules */ + .entries = (const struct xe_rtp_entry_sr[]) { + { XE_RTP_NAME("basic-1"), + XE_RTP_RULES(FUNC(match_yes)), + XE_RTP_ACTIONS(SET(REGULAR_REG1, REG_BIT(0))) + }, + { XE_RTP_NAME("basic-2"), + XE_RTP_RULES(FUNC(match_yes), FUNC(match_no)), + XE_RTP_ACTIONS(SET(REGULAR_REG1, REG_BIT(1))) + }, + {} + }, + }, + { + .name = "two-regs-two-entries", + .expected_reg = REGULAR_REG1, + .expected_set_bits = REG_BIT(0), + .expected_clr_bits = REG_BIT(0), + .expected_count = 2, + /* Same bits on different registers are not coalesced */ + .entries = (const struct xe_rtp_entry_sr[]) { + { XE_RTP_NAME("basic-1"), + XE_RTP_RULES(FUNC(match_yes)), + XE_RTP_ACTIONS(SET(REGULAR_REG1, REG_BIT(0))) + }, + { XE_RTP_NAME("basic-2"), + XE_RTP_RULES(FUNC(match_yes)), + XE_RTP_ACTIONS(SET(REGULAR_REG2, REG_BIT(0))) + }, + {} + }, + }, + { + .name = "clr-one-set-other", + .expected_reg = REGULAR_REG1, + .expected_set_bits = REG_BIT(0), + .expected_clr_bits = REG_BIT(1) | REG_BIT(0), + .expected_count = 1, + /* Check clr vs set actions on different bits */ + .entries = (const struct xe_rtp_entry_sr[]) { + { XE_RTP_NAME("basic-1"), + XE_RTP_RULES(FUNC(match_yes)), + XE_RTP_ACTIONS(SET(REGULAR_REG1, REG_BIT(0))) + }, + { XE_RTP_NAME("basic-2"), + XE_RTP_RULES(FUNC(match_yes)), + XE_RTP_ACTIONS(CLR(REGULAR_REG1, REG_BIT(1))) + }, + {} + }, + }, + { +#define TEMP_MASK REG_GENMASK(10, 8) +#define TEMP_FIELD REG_FIELD_PREP(TEMP_MASK, 2) + .name = "set-field", + .expected_reg = REGULAR_REG1, + .expected_set_bits = TEMP_FIELD, + .expected_clr_bits = TEMP_MASK, + .expected_count = 1, + /* Check FIELD_SET works */ + .entries = (const struct xe_rtp_entry_sr[]) { + { XE_RTP_NAME("basic-1"), + XE_RTP_RULES(FUNC(match_yes)), + XE_RTP_ACTIONS(FIELD_SET(REGULAR_REG1, + TEMP_MASK, TEMP_FIELD)) + }, + {} + }, +#undef TEMP_MASK +#undef TEMP_FIELD + }, + { + .name = "conflict-duplicate", + .expected_reg = REGULAR_REG1, + .expected_set_bits = REG_BIT(0), + .expected_clr_bits = REG_BIT(0), + .expected_count = 1, + .expected_sr_errors = 1, + .entries = (const struct xe_rtp_entry_sr[]) { + { XE_RTP_NAME("basic-1"), + XE_RTP_RULES(FUNC(match_yes)), + XE_RTP_ACTIONS(SET(REGULAR_REG1, REG_BIT(0))) + }, + /* drop: setting same values twice */ + { XE_RTP_NAME("basic-2"), + XE_RTP_RULES(FUNC(match_yes)), + XE_RTP_ACTIONS(SET(REGULAR_REG1, REG_BIT(0))) + }, + {} + }, + }, + { + .name = "conflict-not-disjoint", + .expected_reg = REGULAR_REG1, + .expected_set_bits = REG_BIT(0), + .expected_clr_bits = REG_BIT(0), + .expected_count = 1, + .expected_sr_errors = 1, + .entries = (const struct xe_rtp_entry_sr[]) { + { XE_RTP_NAME("basic-1"), + XE_RTP_RULES(FUNC(match_yes)), + XE_RTP_ACTIONS(SET(REGULAR_REG1, REG_BIT(0))) + }, + /* drop: bits are not disjoint with previous entries */ + { XE_RTP_NAME("basic-2"), + XE_RTP_RULES(FUNC(match_yes)), + XE_RTP_ACTIONS(CLR(REGULAR_REG1, REG_GENMASK(1, 0))) + }, + {} + }, + }, + { + .name = "conflict-reg-type", + .expected_reg = REGULAR_REG1, + .expected_set_bits = REG_BIT(0), + .expected_clr_bits = REG_BIT(0), + .expected_count = 1, + .expected_sr_errors = 2, + .entries = (const struct xe_rtp_entry_sr[]) { + { XE_RTP_NAME("basic-1"), + XE_RTP_RULES(FUNC(match_yes)), + XE_RTP_ACTIONS(SET(REGULAR_REG1, REG_BIT(0))) + }, + /* drop: regular vs MCR */ + { XE_RTP_NAME("basic-2"), + XE_RTP_RULES(FUNC(match_yes)), + XE_RTP_ACTIONS(SET(MCR_REG1, REG_BIT(1))) + }, + /* drop: regular vs masked */ + { XE_RTP_NAME("basic-3"), + XE_RTP_RULES(FUNC(match_yes)), + XE_RTP_ACTIONS(SET(MASKED_REG1, REG_BIT(0))) + }, + {} + }, + }, +}; + +static void xe_rtp_process_tests(struct kunit *test) +{ + const struct rtp_test_case *param = test->param_value; + struct xe_device *xe = test->priv; + struct xe_gt *gt = xe_device_get_root_tile(xe)->primary_gt; + struct xe_reg_sr *reg_sr = >->reg_sr; + const struct xe_reg_sr_entry *sre, *sr_entry = NULL; + struct xe_rtp_process_ctx ctx = XE_RTP_PROCESS_CTX_INITIALIZER(gt); + unsigned long idx, count = 0; + + xe_reg_sr_init(reg_sr, "xe_rtp_tests", xe); + xe_rtp_process_to_sr(&ctx, param->entries, reg_sr); + + xa_for_each(®_sr->xa, idx, sre) { + if (idx == param->expected_reg.addr) + sr_entry = sre; + + count++; + } + + KUNIT_EXPECT_EQ(test, count, param->expected_count); + KUNIT_EXPECT_EQ(test, sr_entry->clr_bits, param->expected_clr_bits); + KUNIT_EXPECT_EQ(test, sr_entry->set_bits, param->expected_set_bits); + KUNIT_EXPECT_EQ(test, sr_entry->reg.raw, param->expected_reg.raw); + KUNIT_EXPECT_EQ(test, reg_sr->errors, param->expected_sr_errors); +} + +static void rtp_desc(const struct rtp_test_case *t, char *desc) +{ + strscpy(desc, t->name, KUNIT_PARAM_DESC_SIZE); +} + +KUNIT_ARRAY_PARAM(rtp, cases, rtp_desc); + +static int xe_rtp_test_init(struct kunit *test) +{ + struct xe_device *xe; + struct device *dev; + int ret; + + dev = drm_kunit_helper_alloc_device(test); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev); + + xe = drm_kunit_helper_alloc_drm_device(test, dev, + struct xe_device, + drm, DRIVER_GEM); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xe); + + /* Initialize an empty device */ + test->priv = NULL; + ret = xe_pci_fake_device_init(xe); + KUNIT_ASSERT_EQ(test, ret, 0); + + xe->drm.dev = dev; + test->priv = xe; + + return 0; +} + +static void xe_rtp_test_exit(struct kunit *test) +{ + struct xe_device *xe = test->priv; + + drm_kunit_helper_free_device(test, xe->drm.dev); +} + +static struct kunit_case xe_rtp_tests[] = { + KUNIT_CASE_PARAM(xe_rtp_process_tests, rtp_gen_params), + {} +}; + +static struct kunit_suite xe_rtp_test_suite = { + .name = "xe_rtp", + .init = xe_rtp_test_init, + .exit = xe_rtp_test_exit, + .test_cases = xe_rtp_tests, +}; + +kunit_test_suite(xe_rtp_test_suite); + +MODULE_AUTHOR("Intel Corporation"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("xe_rtp kunit test"); +MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING); diff --git a/drivers/gpu/drm/xe/tests/xe_test.h b/drivers/gpu/drm/xe/tests/xe_test.h new file mode 100644 index 0000000000..7a1ae213e7 --- /dev/null +++ b/drivers/gpu/drm/xe/tests/xe_test.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0 AND MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef _XE_TEST_H_ +#define _XE_TEST_H_ + +#include <linux/types.h> + +#if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST) +#include <linux/sched.h> +#include <kunit/test.h> + +/* + * Each test that provides a kunit private test structure, place a test id + * here and point the kunit->priv to an embedded struct xe_test_priv. + */ +enum xe_test_priv_id { + XE_TEST_LIVE_DMA_BUF, + XE_TEST_LIVE_MIGRATE, +}; + +/** + * struct xe_test_priv - Base class for test private info + * @id: enum xe_test_priv_id to identify the subclass. + */ +struct xe_test_priv { + enum xe_test_priv_id id; +}; + +#define XE_TEST_DECLARE(x) x +#define XE_TEST_ONLY(x) unlikely(x) +#define XE_TEST_EXPORT +#define xe_cur_kunit() current->kunit_test + +/** + * xe_cur_kunit_priv - Obtain the struct xe_test_priv pointed to by + * current->kunit->priv if it exists and is embedded in the expected subclass. + * @id: Id of the expected subclass. + * + * Return: NULL if the process is not a kunit test, and NULL if the + * current kunit->priv pointer is not pointing to an object of the expected + * subclass. A pointer to the embedded struct xe_test_priv otherwise. + */ +static inline struct xe_test_priv * +xe_cur_kunit_priv(enum xe_test_priv_id id) +{ + struct xe_test_priv *priv; + + if (!xe_cur_kunit()) + return NULL; + + priv = xe_cur_kunit()->priv; + return priv->id == id ? priv : NULL; +} + +#else /* if IS_ENABLED(CONFIG_DRM_XE_KUNIT_TEST) */ + +#define XE_TEST_DECLARE(x) +#define XE_TEST_ONLY(x) 0 +#define XE_TEST_EXPORT static +#define xe_cur_kunit() NULL +#define xe_cur_kunit_priv(_id) NULL + +#endif +#endif diff --git a/drivers/gpu/drm/xe/tests/xe_wa_test.c b/drivers/gpu/drm/xe/tests/xe_wa_test.c new file mode 100644 index 0000000000..b4715b78ef --- /dev/null +++ b/drivers/gpu/drm/xe/tests/xe_wa_test.c @@ -0,0 +1,167 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright © 2023 Intel Corporation + */ + +#include <drm/drm_drv.h> +#include <drm/drm_kunit_helpers.h> + +#include <kunit/test.h> + +#include "xe_device.h" +#include "xe_pci_test.h" +#include "xe_reg_sr.h" +#include "xe_tuning.h" +#include "xe_wa.h" + +struct platform_test_case { + const char *name; + enum xe_platform platform; + enum xe_subplatform subplatform; + u32 graphics_verx100; + u32 media_verx100; + struct xe_step_info step; +}; + +#define PLATFORM_CASE(platform__, graphics_step__) \ + { \ + .name = #platform__ " (" #graphics_step__ ")", \ + .platform = XE_ ## platform__, \ + .subplatform = XE_SUBPLATFORM_NONE, \ + .step = { .graphics = STEP_ ## graphics_step__ } \ + } + + +#define SUBPLATFORM_CASE(platform__, subplatform__, graphics_step__) \ + { \ + .name = #platform__ "_" #subplatform__ " (" #graphics_step__ ")", \ + .platform = XE_ ## platform__, \ + .subplatform = XE_SUBPLATFORM_ ## platform__ ## _ ## subplatform__, \ + .step = { .graphics = STEP_ ## graphics_step__ } \ + } + +#define GMDID_CASE(platform__, graphics_verx100__, graphics_step__, \ + media_verx100__, media_step__) \ + { \ + .name = #platform__ " (g:" #graphics_step__ ", m:" #media_step__ ")",\ + .platform = XE_ ## platform__, \ + .subplatform = XE_SUBPLATFORM_NONE, \ + .graphics_verx100 = graphics_verx100__, \ + .media_verx100 = media_verx100__, \ + .step = { .graphics = STEP_ ## graphics_step__, \ + .media = STEP_ ## media_step__ } \ + } + +static const struct platform_test_case cases[] = { + PLATFORM_CASE(TIGERLAKE, B0), + PLATFORM_CASE(DG1, A0), + PLATFORM_CASE(DG1, B0), + PLATFORM_CASE(ALDERLAKE_S, A0), + PLATFORM_CASE(ALDERLAKE_S, B0), + PLATFORM_CASE(ALDERLAKE_S, C0), + PLATFORM_CASE(ALDERLAKE_S, D0), + PLATFORM_CASE(ALDERLAKE_P, A0), + PLATFORM_CASE(ALDERLAKE_P, B0), + PLATFORM_CASE(ALDERLAKE_P, C0), + SUBPLATFORM_CASE(ALDERLAKE_S, RPLS, D0), + SUBPLATFORM_CASE(ALDERLAKE_P, RPLU, E0), + SUBPLATFORM_CASE(DG2, G10, A0), + SUBPLATFORM_CASE(DG2, G10, A1), + SUBPLATFORM_CASE(DG2, G10, B0), + SUBPLATFORM_CASE(DG2, G10, C0), + SUBPLATFORM_CASE(DG2, G11, A0), + SUBPLATFORM_CASE(DG2, G11, B0), + SUBPLATFORM_CASE(DG2, G11, B1), + SUBPLATFORM_CASE(DG2, G12, A0), + SUBPLATFORM_CASE(DG2, G12, A1), + GMDID_CASE(METEORLAKE, 1270, A0, 1300, A0), + GMDID_CASE(METEORLAKE, 1271, A0, 1300, A0), + GMDID_CASE(LUNARLAKE, 2004, A0, 2000, A0), + GMDID_CASE(LUNARLAKE, 2004, B0, 2000, A0), +}; + +static void platform_desc(const struct platform_test_case *t, char *desc) +{ + strscpy(desc, t->name, KUNIT_PARAM_DESC_SIZE); +} + +KUNIT_ARRAY_PARAM(platform, cases, platform_desc); + +static int xe_wa_test_init(struct kunit *test) +{ + const struct platform_test_case *param = test->param_value; + struct xe_pci_fake_data data = { + .platform = param->platform, + .subplatform = param->subplatform, + .graphics_verx100 = param->graphics_verx100, + .media_verx100 = param->media_verx100, + .graphics_step = param->step.graphics, + .media_step = param->step.media, + }; + struct xe_device *xe; + struct device *dev; + int ret; + + dev = drm_kunit_helper_alloc_device(test); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, dev); + + xe = drm_kunit_helper_alloc_drm_device(test, dev, + struct xe_device, + drm, DRIVER_GEM); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xe); + + test->priv = &data; + ret = xe_pci_fake_device_init(xe); + KUNIT_ASSERT_EQ(test, ret, 0); + + if (!param->graphics_verx100) + xe->info.step = param->step; + + /* TODO: init hw engines for engine/LRC WAs */ + xe->drm.dev = dev; + test->priv = xe; + + return 0; +} + +static void xe_wa_test_exit(struct kunit *test) +{ + struct xe_device *xe = test->priv; + + drm_kunit_helper_free_device(test, xe->drm.dev); +} + +static void xe_wa_gt(struct kunit *test) +{ + struct xe_device *xe = test->priv; + struct xe_gt *gt; + int id; + + for_each_gt(gt, xe, id) { + xe_reg_sr_init(>->reg_sr, "GT", xe); + + xe_wa_process_gt(gt); + xe_tuning_process_gt(gt); + + KUNIT_ASSERT_EQ(test, gt->reg_sr.errors, 0); + } +} + +static struct kunit_case xe_wa_tests[] = { + KUNIT_CASE_PARAM(xe_wa_gt, platform_gen_params), + {} +}; + +static struct kunit_suite xe_rtp_test_suite = { + .name = "xe_wa", + .init = xe_wa_test_init, + .exit = xe_wa_test_exit, + .test_cases = xe_wa_tests, +}; + +kunit_test_suite(xe_rtp_test_suite); + +MODULE_AUTHOR("Intel Corporation"); +MODULE_LICENSE("GPL"); +MODULE_DESCRIPTION("xe_wa kunit test"); +MODULE_IMPORT_NS(EXPORTED_FOR_KUNIT_TESTING); |