summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/tests
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/gpu/drm/tests
parentInitial commit. (diff)
downloadlinux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz
linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip
Adding upstream version 6.1.76.upstream/6.1.76
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/gpu/drm/tests')
-rw-r--r--drivers/gpu/drm/tests/.kunitconfig3
-rw-r--r--drivers/gpu/drm/tests/Makefile5
-rw-r--r--drivers/gpu/drm/tests/drm_buddy_test.c757
-rw-r--r--drivers/gpu/drm/tests/drm_cmdline_parser_test.c991
-rw-r--r--drivers/gpu/drm/tests/drm_damage_helper_test.c639
-rw-r--r--drivers/gpu/drm/tests/drm_dp_mst_helper_test.c286
-rw-r--r--drivers/gpu/drm/tests/drm_format_helper_test.c463
-rw-r--r--drivers/gpu/drm/tests/drm_format_test.c359
-rw-r--r--drivers/gpu/drm/tests/drm_framebuffer_test.c382
-rw-r--r--drivers/gpu/drm/tests/drm_mm_test.c2256
-rw-r--r--drivers/gpu/drm/tests/drm_plane_helper_test.c237
-rw-r--r--drivers/gpu/drm/tests/drm_rect_test.c214
12 files changed, 6592 insertions, 0 deletions
diff --git a/drivers/gpu/drm/tests/.kunitconfig b/drivers/gpu/drm/tests/.kunitconfig
new file mode 100644
index 000000000..6ec04b4c9
--- /dev/null
+++ b/drivers/gpu/drm/tests/.kunitconfig
@@ -0,0 +1,3 @@
+CONFIG_KUNIT=y
+CONFIG_DRM=y
+CONFIG_DRM_KUNIT_TEST=y
diff --git a/drivers/gpu/drm/tests/Makefile b/drivers/gpu/drm/tests/Makefile
new file mode 100644
index 000000000..91b70f7d2
--- /dev/null
+++ b/drivers/gpu/drm/tests/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+
+obj-$(CONFIG_DRM_KUNIT_TEST) += drm_format_helper_test.o drm_damage_helper_test.o \
+ drm_cmdline_parser_test.o drm_rect_test.o drm_format_test.o drm_plane_helper_test.o \
+ drm_dp_mst_helper_test.o drm_framebuffer_test.o drm_buddy_test.o drm_mm_test.o
diff --git a/drivers/gpu/drm/tests/drm_buddy_test.c b/drivers/gpu/drm/tests/drm_buddy_test.c
new file mode 100644
index 000000000..a699fc0dc
--- /dev/null
+++ b/drivers/gpu/drm/tests/drm_buddy_test.c
@@ -0,0 +1,757 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ * Copyright © 2022 Maíra Canal <mairacanal@riseup.net>
+ */
+
+#include <kunit/test.h>
+
+#include <linux/prime_numbers.h>
+#include <linux/sched/signal.h>
+
+#include <drm/drm_buddy.h>
+
+#include "../lib/drm_random.h"
+
+#define TIMEOUT(name__) \
+ unsigned long name__ = jiffies + MAX_SCHEDULE_TIMEOUT
+
+static unsigned int random_seed;
+
+static inline u64 get_size(int order, u64 chunk_size)
+{
+ return (1 << order) * chunk_size;
+}
+
+__printf(2, 3)
+static bool __timeout(unsigned long timeout, const char *fmt, ...)
+{
+ va_list va;
+
+ if (!signal_pending(current)) {
+ cond_resched();
+ if (time_before(jiffies, timeout))
+ return false;
+ }
+
+ if (fmt) {
+ va_start(va, fmt);
+ vprintk(fmt, va);
+ va_end(va);
+ }
+
+ return true;
+}
+
+static void __dump_block(struct kunit *test, struct drm_buddy *mm,
+ struct drm_buddy_block *block, bool buddy)
+{
+ kunit_err(test, "block info: header=%llx, state=%u, order=%d, offset=%llx size=%llx root=%d buddy=%d\n",
+ block->header, drm_buddy_block_state(block),
+ drm_buddy_block_order(block), drm_buddy_block_offset(block),
+ drm_buddy_block_size(mm, block), !block->parent, buddy);
+}
+
+static void dump_block(struct kunit *test, struct drm_buddy *mm,
+ struct drm_buddy_block *block)
+{
+ struct drm_buddy_block *buddy;
+
+ __dump_block(test, mm, block, false);
+
+ buddy = drm_get_buddy(block);
+ if (buddy)
+ __dump_block(test, mm, buddy, true);
+}
+
+static int check_block(struct kunit *test, struct drm_buddy *mm,
+ struct drm_buddy_block *block)
+{
+ struct drm_buddy_block *buddy;
+ unsigned int block_state;
+ u64 block_size;
+ u64 offset;
+ int err = 0;
+
+ block_state = drm_buddy_block_state(block);
+
+ if (block_state != DRM_BUDDY_ALLOCATED &&
+ block_state != DRM_BUDDY_FREE && block_state != DRM_BUDDY_SPLIT) {
+ kunit_err(test, "block state mismatch\n");
+ err = -EINVAL;
+ }
+
+ block_size = drm_buddy_block_size(mm, block);
+ offset = drm_buddy_block_offset(block);
+
+ if (block_size < mm->chunk_size) {
+ kunit_err(test, "block size smaller than min size\n");
+ err = -EINVAL;
+ }
+
+ /* We can't use is_power_of_2() for a u64 on 32-bit systems. */
+ if (block_size & (block_size - 1)) {
+ kunit_err(test, "block size not power of two\n");
+ err = -EINVAL;
+ }
+
+ if (!IS_ALIGNED(block_size, mm->chunk_size)) {
+ kunit_err(test, "block size not aligned to min size\n");
+ err = -EINVAL;
+ }
+
+ if (!IS_ALIGNED(offset, mm->chunk_size)) {
+ kunit_err(test, "block offset not aligned to min size\n");
+ err = -EINVAL;
+ }
+
+ if (!IS_ALIGNED(offset, block_size)) {
+ kunit_err(test, "block offset not aligned to block size\n");
+ err = -EINVAL;
+ }
+
+ buddy = drm_get_buddy(block);
+
+ if (!buddy && block->parent) {
+ kunit_err(test, "buddy has gone fishing\n");
+ err = -EINVAL;
+ }
+
+ if (buddy) {
+ if (drm_buddy_block_offset(buddy) != (offset ^ block_size)) {
+ kunit_err(test, "buddy has wrong offset\n");
+ err = -EINVAL;
+ }
+
+ if (drm_buddy_block_size(mm, buddy) != block_size) {
+ kunit_err(test, "buddy size mismatch\n");
+ err = -EINVAL;
+ }
+
+ if (drm_buddy_block_state(buddy) == block_state &&
+ block_state == DRM_BUDDY_FREE) {
+ kunit_err(test, "block and its buddy are free\n");
+ err = -EINVAL;
+ }
+ }
+
+ return err;
+}
+
+static int check_blocks(struct kunit *test, struct drm_buddy *mm,
+ struct list_head *blocks, u64 expected_size, bool is_contiguous)
+{
+ struct drm_buddy_block *block;
+ struct drm_buddy_block *prev;
+ u64 total;
+ int err = 0;
+
+ block = NULL;
+ prev = NULL;
+ total = 0;
+
+ list_for_each_entry(block, blocks, link) {
+ err = check_block(test, mm, block);
+
+ if (!drm_buddy_block_is_allocated(block)) {
+ kunit_err(test, "block not allocated\n");
+ err = -EINVAL;
+ }
+
+ if (is_contiguous && prev) {
+ u64 prev_block_size;
+ u64 prev_offset;
+ u64 offset;
+
+ prev_offset = drm_buddy_block_offset(prev);
+ prev_block_size = drm_buddy_block_size(mm, prev);
+ offset = drm_buddy_block_offset(block);
+
+ if (offset != (prev_offset + prev_block_size)) {
+ kunit_err(test, "block offset mismatch\n");
+ err = -EINVAL;
+ }
+ }
+
+ if (err)
+ break;
+
+ total += drm_buddy_block_size(mm, block);
+ prev = block;
+ }
+
+ if (!err) {
+ if (total != expected_size) {
+ kunit_err(test, "size mismatch, expected=%llx, found=%llx\n",
+ expected_size, total);
+ err = -EINVAL;
+ }
+ return err;
+ }
+
+ if (prev) {
+ kunit_err(test, "prev block, dump:\n");
+ dump_block(test, mm, prev);
+ }
+
+ kunit_err(test, "bad block, dump:\n");
+ dump_block(test, mm, block);
+
+ return err;
+}
+
+static int check_mm(struct kunit *test, struct drm_buddy *mm)
+{
+ struct drm_buddy_block *root;
+ struct drm_buddy_block *prev;
+ unsigned int i;
+ u64 total;
+ int err = 0;
+
+ if (!mm->n_roots) {
+ kunit_err(test, "n_roots is zero\n");
+ return -EINVAL;
+ }
+
+ if (mm->n_roots != hweight64(mm->size)) {
+ kunit_err(test, "n_roots mismatch, n_roots=%u, expected=%lu\n",
+ mm->n_roots, hweight64(mm->size));
+ return -EINVAL;
+ }
+
+ root = NULL;
+ prev = NULL;
+ total = 0;
+
+ for (i = 0; i < mm->n_roots; ++i) {
+ struct drm_buddy_block *block;
+ unsigned int order;
+
+ root = mm->roots[i];
+ if (!root) {
+ kunit_err(test, "root(%u) is NULL\n", i);
+ err = -EINVAL;
+ break;
+ }
+
+ err = check_block(test, mm, root);
+
+ if (!drm_buddy_block_is_free(root)) {
+ kunit_err(test, "root not free\n");
+ err = -EINVAL;
+ }
+
+ order = drm_buddy_block_order(root);
+
+ if (!i) {
+ if (order != mm->max_order) {
+ kunit_err(test, "max order root missing\n");
+ err = -EINVAL;
+ }
+ }
+
+ if (prev) {
+ u64 prev_block_size;
+ u64 prev_offset;
+ u64 offset;
+
+ prev_offset = drm_buddy_block_offset(prev);
+ prev_block_size = drm_buddy_block_size(mm, prev);
+ offset = drm_buddy_block_offset(root);
+
+ if (offset != (prev_offset + prev_block_size)) {
+ kunit_err(test, "root offset mismatch\n");
+ err = -EINVAL;
+ }
+ }
+
+ block = list_first_entry_or_null(&mm->free_list[order],
+ struct drm_buddy_block, link);
+ if (block != root) {
+ kunit_err(test, "root mismatch at order=%u\n", order);
+ err = -EINVAL;
+ }
+
+ if (err)
+ break;
+
+ prev = root;
+ total += drm_buddy_block_size(mm, root);
+ }
+
+ if (!err) {
+ if (total != mm->size) {
+ kunit_err(test, "expected mm size=%llx, found=%llx\n",
+ mm->size, total);
+ err = -EINVAL;
+ }
+ return err;
+ }
+
+ if (prev) {
+ kunit_err(test, "prev root(%u), dump:\n", i - 1);
+ dump_block(test, mm, prev);
+ }
+
+ if (root) {
+ kunit_err(test, "bad root(%u), dump:\n", i);
+ dump_block(test, mm, root);
+ }
+
+ return err;
+}
+
+static void mm_config(u64 *size, u64 *chunk_size)
+{
+ DRM_RND_STATE(prng, random_seed);
+ u32 s, ms;
+
+ /* Nothing fancy, just try to get an interesting bit pattern */
+
+ prandom_seed_state(&prng, random_seed);
+
+ /* Let size be a random number of pages up to 8 GB (2M pages) */
+ s = 1 + drm_prandom_u32_max_state((BIT(33 - 12)) - 1, &prng);
+ /* Let the chunk size be a random power of 2 less than size */
+ ms = BIT(drm_prandom_u32_max_state(ilog2(s), &prng));
+ /* Round size down to the chunk size */
+ s &= -ms;
+
+ /* Convert from pages to bytes */
+ *chunk_size = (u64)ms << 12;
+ *size = (u64)s << 12;
+}
+
+static void drm_test_buddy_alloc_pathological(struct kunit *test)
+{
+ u64 mm_size, size, start = 0;
+ struct drm_buddy_block *block;
+ const int max_order = 3;
+ unsigned long flags = 0;
+ int order, top;
+ struct drm_buddy mm;
+ LIST_HEAD(blocks);
+ LIST_HEAD(holes);
+ LIST_HEAD(tmp);
+
+ /*
+ * Create a pot-sized mm, then allocate one of each possible
+ * order within. This should leave the mm with exactly one
+ * page left. Free the largest block, then whittle down again.
+ * Eventually we will have a fully 50% fragmented mm.
+ */
+
+ mm_size = PAGE_SIZE << max_order;
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, PAGE_SIZE),
+ "buddy_init failed\n");
+
+ KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
+
+ for (top = max_order; top; top--) {
+ /* Make room by freeing the largest allocated block */
+ block = list_first_entry_or_null(&blocks, typeof(*block), link);
+ if (block) {
+ list_del(&block->link);
+ drm_buddy_free_block(&mm, block);
+ }
+
+ for (order = top; order--;) {
+ size = get_size(order, PAGE_SIZE);
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start,
+ mm_size, size, size,
+ &tmp, flags),
+ "buddy_alloc hit -ENOMEM with order=%d, top=%d\n",
+ order, top);
+
+ block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
+ KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
+
+ list_move_tail(&block->link, &blocks);
+ }
+
+ /* There should be one final page for this sub-allocation */
+ size = get_size(0, PAGE_SIZE);
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
+ size, size, &tmp, flags),
+ "buddy_alloc hit -ENOMEM for hole\n");
+
+ block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
+ KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
+
+ list_move_tail(&block->link, &holes);
+
+ size = get_size(top, PAGE_SIZE);
+ KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
+ size, size, &tmp, flags),
+ "buddy_alloc unexpectedly succeeded at top-order %d/%d, it should be full!",
+ top, max_order);
+ }
+
+ drm_buddy_free_list(&mm, &holes);
+
+ /* Nothing larger than blocks of chunk_size now available */
+ for (order = 1; order <= max_order; order++) {
+ size = get_size(order, PAGE_SIZE);
+ KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
+ size, size, &tmp, flags),
+ "buddy_alloc unexpectedly succeeded at order %d, it should be full!",
+ order);
+ }
+
+ list_splice_tail(&holes, &blocks);
+ drm_buddy_free_list(&mm, &blocks);
+ drm_buddy_fini(&mm);
+}
+
+static void drm_test_buddy_alloc_smoke(struct kunit *test)
+{
+ u64 mm_size, chunk_size, start = 0;
+ unsigned long flags = 0;
+ struct drm_buddy mm;
+ int *order;
+ int i;
+
+ DRM_RND_STATE(prng, random_seed);
+ TIMEOUT(end_time);
+
+ mm_config(&mm_size, &chunk_size);
+
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, chunk_size),
+ "buddy_init failed\n");
+
+ order = drm_random_order(mm.max_order + 1, &prng);
+ KUNIT_ASSERT_TRUE(test, order);
+
+ for (i = 0; i <= mm.max_order; ++i) {
+ struct drm_buddy_block *block;
+ int max_order = order[i];
+ bool timeout = false;
+ LIST_HEAD(blocks);
+ u64 total, size;
+ LIST_HEAD(tmp);
+ int order, err;
+
+ KUNIT_ASSERT_FALSE_MSG(test, check_mm(test, &mm),
+ "pre-mm check failed, abort\n");
+
+ order = max_order;
+ total = 0;
+
+ do {
+retry:
+ size = get_size(order, chunk_size);
+ err = drm_buddy_alloc_blocks(&mm, start, mm_size, size, size, &tmp, flags);
+ if (err) {
+ if (err == -ENOMEM) {
+ KUNIT_FAIL(test, "buddy_alloc hit -ENOMEM with order=%d\n",
+ order);
+ } else {
+ if (order--) {
+ err = 0;
+ goto retry;
+ }
+
+ KUNIT_FAIL(test, "buddy_alloc with order=%d failed\n",
+ order);
+ }
+
+ break;
+ }
+
+ block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
+ KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
+
+ list_move_tail(&block->link, &blocks);
+ KUNIT_EXPECT_EQ_MSG(test, drm_buddy_block_order(block), order,
+ "buddy_alloc order mismatch\n");
+
+ total += drm_buddy_block_size(&mm, block);
+
+ if (__timeout(end_time, NULL)) {
+ timeout = true;
+ break;
+ }
+ } while (total < mm.size);
+
+ if (!err)
+ err = check_blocks(test, &mm, &blocks, total, false);
+
+ drm_buddy_free_list(&mm, &blocks);
+
+ if (!err) {
+ KUNIT_EXPECT_FALSE_MSG(test, check_mm(test, &mm),
+ "post-mm check failed\n");
+ }
+
+ if (err || timeout)
+ break;
+
+ cond_resched();
+ }
+
+ kfree(order);
+ drm_buddy_fini(&mm);
+}
+
+static void drm_test_buddy_alloc_pessimistic(struct kunit *test)
+{
+ u64 mm_size, size, start = 0;
+ struct drm_buddy_block *block, *bn;
+ const unsigned int max_order = 16;
+ unsigned long flags = 0;
+ struct drm_buddy mm;
+ unsigned int order;
+ LIST_HEAD(blocks);
+ LIST_HEAD(tmp);
+
+ /*
+ * Create a pot-sized mm, then allocate one of each possible
+ * order within. This should leave the mm with exactly one
+ * page left.
+ */
+
+ mm_size = PAGE_SIZE << max_order;
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, PAGE_SIZE),
+ "buddy_init failed\n");
+
+ KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
+
+ for (order = 0; order < max_order; order++) {
+ size = get_size(order, PAGE_SIZE);
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
+ size, size, &tmp, flags),
+ "buddy_alloc hit -ENOMEM with order=%d\n",
+ order);
+
+ block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
+ KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
+
+ list_move_tail(&block->link, &blocks);
+ }
+
+ /* And now the last remaining block available */
+ size = get_size(0, PAGE_SIZE);
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
+ size, size, &tmp, flags),
+ "buddy_alloc hit -ENOMEM on final alloc\n");
+
+ block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
+ KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
+
+ list_move_tail(&block->link, &blocks);
+
+ /* Should be completely full! */
+ for (order = max_order; order--;) {
+ size = get_size(order, PAGE_SIZE);
+ KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
+ size, size, &tmp, flags),
+ "buddy_alloc unexpectedly succeeded, it should be full!");
+ }
+
+ block = list_last_entry(&blocks, typeof(*block), link);
+ list_del(&block->link);
+ drm_buddy_free_block(&mm, block);
+
+ /* As we free in increasing size, we make available larger blocks */
+ order = 1;
+ list_for_each_entry_safe(block, bn, &blocks, link) {
+ list_del(&block->link);
+ drm_buddy_free_block(&mm, block);
+
+ size = get_size(order, PAGE_SIZE);
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
+ size, size, &tmp, flags),
+ "buddy_alloc hit -ENOMEM with order=%d\n",
+ order);
+
+ block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
+ KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
+
+ list_del(&block->link);
+ drm_buddy_free_block(&mm, block);
+ order++;
+ }
+
+ /* To confirm, now the whole mm should be available */
+ size = get_size(max_order, PAGE_SIZE);
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
+ size, size, &tmp, flags),
+ "buddy_alloc (realloc) hit -ENOMEM with order=%d\n",
+ max_order);
+
+ block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
+ KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
+
+ list_del(&block->link);
+ drm_buddy_free_block(&mm, block);
+ drm_buddy_free_list(&mm, &blocks);
+ drm_buddy_fini(&mm);
+}
+
+static void drm_test_buddy_alloc_optimistic(struct kunit *test)
+{
+ u64 mm_size, size, start = 0;
+ struct drm_buddy_block *block;
+ unsigned long flags = 0;
+ const int max_order = 16;
+ struct drm_buddy mm;
+ LIST_HEAD(blocks);
+ LIST_HEAD(tmp);
+ int order;
+
+ /*
+ * Create a mm with one block of each order available, and
+ * try to allocate them all.
+ */
+
+ mm_size = PAGE_SIZE * ((1 << (max_order + 1)) - 1);
+
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, mm_size, PAGE_SIZE),
+ "buddy_init failed\n");
+
+ KUNIT_EXPECT_EQ(test, mm.max_order, max_order);
+
+ for (order = 0; order <= max_order; order++) {
+ size = get_size(order, PAGE_SIZE);
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
+ size, size, &tmp, flags),
+ "buddy_alloc hit -ENOMEM with order=%d\n",
+ order);
+
+ block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
+ KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_blocks has no blocks\n");
+
+ list_move_tail(&block->link, &blocks);
+ }
+
+ /* Should be completely full! */
+ size = get_size(0, PAGE_SIZE);
+ KUNIT_ASSERT_TRUE_MSG(test, drm_buddy_alloc_blocks(&mm, start, mm_size,
+ size, size, &tmp, flags),
+ "buddy_alloc unexpectedly succeeded, it should be full!");
+
+ drm_buddy_free_list(&mm, &blocks);
+ drm_buddy_fini(&mm);
+}
+
+static void drm_test_buddy_alloc_range(struct kunit *test)
+{
+ unsigned long flags = DRM_BUDDY_RANGE_ALLOCATION;
+ u64 offset, size, rem, chunk_size, end;
+ unsigned long page_num;
+ struct drm_buddy mm;
+ LIST_HEAD(blocks);
+
+ mm_config(&size, &chunk_size);
+
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_init(&mm, size, chunk_size),
+ "buddy_init failed");
+
+ KUNIT_ASSERT_FALSE_MSG(test, check_mm(test, &mm),
+ "pre-mm check failed, abort!");
+
+ rem = mm.size;
+ offset = 0;
+
+ for_each_prime_number_from(page_num, 1, ULONG_MAX - 1) {
+ struct drm_buddy_block *block;
+ LIST_HEAD(tmp);
+
+ size = min(page_num * mm.chunk_size, rem);
+ end = offset + size;
+
+ KUNIT_ASSERT_FALSE_MSG(test, drm_buddy_alloc_blocks(&mm, offset, end,
+ size, mm.chunk_size,
+ &tmp, flags),
+ "alloc_range with offset=%llx, size=%llx failed\n", offset, size);
+
+ block = list_first_entry_or_null(&tmp, struct drm_buddy_block, link);
+ KUNIT_ASSERT_TRUE_MSG(test, block, "alloc_range has no blocks\n");
+
+ KUNIT_ASSERT_EQ_MSG(test, drm_buddy_block_offset(block), offset,
+ "alloc_range start offset mismatch, found=%llx, expected=%llx\n",
+ drm_buddy_block_offset(block), offset);
+
+ KUNIT_ASSERT_FALSE(test, check_blocks(test, &mm, &tmp, size, true));
+
+ list_splice_tail(&tmp, &blocks);
+
+ offset += size;
+
+ rem -= size;
+ if (!rem)
+ break;
+
+ cond_resched();
+ }
+
+ drm_buddy_free_list(&mm, &blocks);
+
+ KUNIT_EXPECT_FALSE_MSG(test, check_mm(test, &mm), "post-mm check failed\n");
+
+ drm_buddy_fini(&mm);
+}
+
+static void drm_test_buddy_alloc_limit(struct kunit *test)
+{
+ u64 size = U64_MAX, start = 0;
+ struct drm_buddy_block *block;
+ unsigned long flags = 0;
+ LIST_HEAD(allocated);
+ struct drm_buddy mm;
+
+ KUNIT_EXPECT_FALSE(test, drm_buddy_init(&mm, size, PAGE_SIZE));
+
+ KUNIT_EXPECT_EQ_MSG(test, mm.max_order, DRM_BUDDY_MAX_ORDER,
+ "mm.max_order(%d) != %d\n", mm.max_order,
+ DRM_BUDDY_MAX_ORDER);
+
+ size = mm.chunk_size << mm.max_order;
+ KUNIT_EXPECT_FALSE(test, drm_buddy_alloc_blocks(&mm, start, size, size,
+ PAGE_SIZE, &allocated, flags));
+
+ block = list_first_entry_or_null(&allocated, struct drm_buddy_block, link);
+ KUNIT_EXPECT_TRUE(test, block);
+
+ KUNIT_EXPECT_EQ_MSG(test, drm_buddy_block_order(block), mm.max_order,
+ "block order(%d) != %d\n",
+ drm_buddy_block_order(block), mm.max_order);
+
+ KUNIT_EXPECT_EQ_MSG(test, drm_buddy_block_size(&mm, block),
+ BIT_ULL(mm.max_order) * PAGE_SIZE,
+ "block size(%llu) != %llu\n",
+ drm_buddy_block_size(&mm, block),
+ BIT_ULL(mm.max_order) * PAGE_SIZE);
+
+ drm_buddy_free_list(&mm, &allocated);
+ drm_buddy_fini(&mm);
+}
+
+static int drm_buddy_init_test(struct kunit *test)
+{
+ while (!random_seed)
+ random_seed = get_random_u32();
+
+ return 0;
+}
+
+static struct kunit_case drm_buddy_tests[] = {
+ KUNIT_CASE(drm_test_buddy_alloc_limit),
+ KUNIT_CASE(drm_test_buddy_alloc_range),
+ KUNIT_CASE(drm_test_buddy_alloc_optimistic),
+ KUNIT_CASE(drm_test_buddy_alloc_pessimistic),
+ KUNIT_CASE(drm_test_buddy_alloc_smoke),
+ KUNIT_CASE(drm_test_buddy_alloc_pathological),
+ {}
+};
+
+static struct kunit_suite drm_buddy_test_suite = {
+ .name = "drm_buddy",
+ .init = drm_buddy_init_test,
+ .test_cases = drm_buddy_tests,
+};
+
+kunit_test_suite(drm_buddy_test_suite);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tests/drm_cmdline_parser_test.c b/drivers/gpu/drm/tests/drm_cmdline_parser_test.c
new file mode 100644
index 000000000..34790e7a3
--- /dev/null
+++ b/drivers/gpu/drm/tests/drm_cmdline_parser_test.c
@@ -0,0 +1,991 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2019 Bootlin
+ * Copyright (c) 2022 Maíra Canal <mairacanal@riseup.net>
+ */
+
+#include <kunit/test.h>
+
+#include <drm/drm_connector.h>
+#include <drm/drm_modes.h>
+
+static const struct drm_connector no_connector = {};
+
+static void drm_test_cmdline_force_e_only(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "e";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_FALSE(test, mode.specified);
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_ON);
+}
+
+static void drm_test_cmdline_force_D_only_not_digital(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "D";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_FALSE(test, mode.specified);
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_ON);
+}
+
+static const struct drm_connector connector_hdmi = {
+ .connector_type = DRM_MODE_CONNECTOR_HDMIB,
+};
+
+static void drm_test_cmdline_force_D_only_hdmi(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "D";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &connector_hdmi, &mode));
+ KUNIT_EXPECT_FALSE(test, mode.specified);
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_ON_DIGITAL);
+}
+
+static const struct drm_connector connector_dvi = {
+ .connector_type = DRM_MODE_CONNECTOR_DVII,
+};
+
+static void drm_test_cmdline_force_D_only_dvi(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "D";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &connector_dvi, &mode));
+ KUNIT_EXPECT_FALSE(test, mode.specified);
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_ON_DIGITAL);
+}
+
+static void drm_test_cmdline_force_d_only(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "d";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_FALSE(test, mode.specified);
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_OFF);
+}
+
+static void drm_test_cmdline_res(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
+}
+
+static void drm_test_cmdline_res_vesa(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480M";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_TRUE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
+}
+
+static void drm_test_cmdline_res_vesa_rblank(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480MR";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_TRUE(test, mode.rb);
+ KUNIT_EXPECT_TRUE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
+}
+
+static void drm_test_cmdline_res_rblank(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480R";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_TRUE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
+}
+
+static void drm_test_cmdline_res_bpp(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480-24";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+
+ KUNIT_EXPECT_TRUE(test, mode.bpp_specified);
+ KUNIT_EXPECT_EQ(test, mode.bpp, 24);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
+}
+
+static void drm_test_cmdline_res_refresh(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480@60";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+
+ KUNIT_EXPECT_TRUE(test, mode.refresh_specified);
+ KUNIT_EXPECT_EQ(test, mode.refresh, 60);
+
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
+}
+
+static void drm_test_cmdline_res_bpp_refresh(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480-24@60";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+
+ KUNIT_EXPECT_TRUE(test, mode.refresh_specified);
+ KUNIT_EXPECT_EQ(test, mode.refresh, 60);
+
+ KUNIT_EXPECT_TRUE(test, mode.bpp_specified);
+ KUNIT_EXPECT_EQ(test, mode.bpp, 24);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
+}
+
+static void drm_test_cmdline_res_bpp_refresh_interlaced(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480-24@60i";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+
+ KUNIT_EXPECT_TRUE(test, mode.refresh_specified);
+ KUNIT_EXPECT_EQ(test, mode.refresh, 60);
+
+ KUNIT_EXPECT_TRUE(test, mode.bpp_specified);
+ KUNIT_EXPECT_EQ(test, mode.bpp, 24);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_TRUE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
+}
+
+static void drm_test_cmdline_res_bpp_refresh_margins(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480-24@60m";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+
+ KUNIT_EXPECT_TRUE(test, mode.refresh_specified);
+ KUNIT_EXPECT_EQ(test, mode.refresh, 60);
+
+ KUNIT_EXPECT_TRUE(test, mode.bpp_specified);
+ KUNIT_EXPECT_EQ(test, mode.bpp, 24);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_TRUE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
+}
+
+static void drm_test_cmdline_res_bpp_refresh_force_off(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480-24@60d";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+
+ KUNIT_EXPECT_TRUE(test, mode.refresh_specified);
+ KUNIT_EXPECT_EQ(test, mode.refresh, 60);
+
+ KUNIT_EXPECT_TRUE(test, mode.bpp_specified);
+ KUNIT_EXPECT_EQ(test, mode.bpp, 24);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_OFF);
+}
+
+static void drm_test_cmdline_res_bpp_refresh_force_on(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480-24@60e";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+
+ KUNIT_EXPECT_TRUE(test, mode.refresh_specified);
+ KUNIT_EXPECT_EQ(test, mode.refresh, 60);
+
+ KUNIT_EXPECT_TRUE(test, mode.bpp_specified);
+ KUNIT_EXPECT_EQ(test, mode.bpp, 24);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_ON);
+}
+
+static void drm_test_cmdline_res_bpp_refresh_force_on_analog(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480-24@60D";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+
+ KUNIT_EXPECT_TRUE(test, mode.refresh_specified);
+ KUNIT_EXPECT_EQ(test, mode.refresh, 60);
+
+ KUNIT_EXPECT_TRUE(test, mode.bpp_specified);
+ KUNIT_EXPECT_EQ(test, mode.bpp, 24);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_ON);
+}
+
+static void drm_test_cmdline_res_bpp_refresh_force_on_digital(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ static const struct drm_connector connector = {
+ .connector_type = DRM_MODE_CONNECTOR_DVII,
+ };
+ const char *cmdline = "720x480-24@60D";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+
+ KUNIT_EXPECT_TRUE(test, mode.refresh_specified);
+ KUNIT_EXPECT_EQ(test, mode.refresh, 60);
+
+ KUNIT_EXPECT_TRUE(test, mode.bpp_specified);
+ KUNIT_EXPECT_EQ(test, mode.bpp, 24);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_ON_DIGITAL);
+}
+
+static void drm_test_cmdline_res_bpp_refresh_interlaced_margins_force_on(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480-24@60ime";
+
+ KUNIT_EXPECT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+
+ KUNIT_EXPECT_TRUE(test, mode.refresh_specified);
+ KUNIT_EXPECT_EQ(test, mode.refresh, 60);
+
+ KUNIT_EXPECT_TRUE(test, mode.bpp_specified);
+ KUNIT_EXPECT_EQ(test, mode.bpp, 24);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_TRUE(test, mode.interlace);
+ KUNIT_EXPECT_TRUE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_ON);
+}
+
+static void drm_test_cmdline_res_margins_force_on(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480me";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_TRUE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_ON);
+}
+
+static void drm_test_cmdline_res_vesa_margins(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480Mm";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_TRUE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_TRUE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
+}
+
+static void drm_test_cmdline_name(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "NTSC";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_STREQ(test, mode.name, "NTSC");
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+}
+
+static void drm_test_cmdline_name_bpp(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "NTSC-24";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_STREQ(test, mode.name, "NTSC");
+
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+
+ KUNIT_EXPECT_TRUE(test, mode.bpp_specified);
+ KUNIT_EXPECT_EQ(test, mode.bpp, 24);
+}
+
+static void drm_test_cmdline_name_option(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "NTSC,rotate=180";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_STREQ(test, mode.name, "NTSC");
+ KUNIT_EXPECT_EQ(test, mode.rotation_reflection, DRM_MODE_ROTATE_180);
+}
+
+static void drm_test_cmdline_name_bpp_option(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "NTSC-24,rotate=180";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_STREQ(test, mode.name, "NTSC");
+ KUNIT_EXPECT_EQ(test, mode.rotation_reflection, DRM_MODE_ROTATE_180);
+ KUNIT_EXPECT_TRUE(test, mode.bpp_specified);
+ KUNIT_EXPECT_EQ(test, mode.bpp, 24);
+}
+
+static void drm_test_cmdline_rotate_0(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480,rotate=0";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+ KUNIT_EXPECT_EQ(test, mode.rotation_reflection, DRM_MODE_ROTATE_0);
+
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
+}
+
+static void drm_test_cmdline_rotate_90(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480,rotate=90";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+ KUNIT_EXPECT_EQ(test, mode.rotation_reflection, DRM_MODE_ROTATE_90);
+
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
+}
+
+static void drm_test_cmdline_rotate_180(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480,rotate=180";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+ KUNIT_EXPECT_EQ(test, mode.rotation_reflection, DRM_MODE_ROTATE_180);
+
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
+}
+
+static void drm_test_cmdline_rotate_270(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480,rotate=270";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+ KUNIT_EXPECT_EQ(test, mode.rotation_reflection, DRM_MODE_ROTATE_270);
+
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
+}
+
+static void drm_test_cmdline_hmirror(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480,reflect_x";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+ KUNIT_EXPECT_EQ(test, mode.rotation_reflection, (DRM_MODE_ROTATE_0 | DRM_MODE_REFLECT_X));
+
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
+}
+
+static void drm_test_cmdline_vmirror(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480,reflect_y";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+ KUNIT_EXPECT_EQ(test, mode.rotation_reflection, (DRM_MODE_ROTATE_0 | DRM_MODE_REFLECT_Y));
+
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
+}
+
+static void drm_test_cmdline_margin_options(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline =
+ "720x480,margin_right=14,margin_left=24,margin_bottom=36,margin_top=42";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+ KUNIT_EXPECT_EQ(test, mode.tv_margins.right, 14);
+ KUNIT_EXPECT_EQ(test, mode.tv_margins.left, 24);
+ KUNIT_EXPECT_EQ(test, mode.tv_margins.bottom, 36);
+ KUNIT_EXPECT_EQ(test, mode.tv_margins.top, 42);
+
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
+}
+
+static void drm_test_cmdline_multiple_options(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480,rotate=270,reflect_x";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+ KUNIT_EXPECT_EQ(test, mode.rotation_reflection, (DRM_MODE_ROTATE_270 | DRM_MODE_REFLECT_X));
+
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
+}
+
+static void drm_test_cmdline_bpp_extra_and_option(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480-24e,rotate=180";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+ KUNIT_EXPECT_EQ(test, mode.rotation_reflection, DRM_MODE_ROTATE_180);
+
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+
+ KUNIT_EXPECT_TRUE(test, mode.bpp_specified);
+ KUNIT_EXPECT_EQ(test, mode.bpp, 24);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_ON);
+}
+
+static void drm_test_cmdline_extra_and_option(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "720x480e,rotate=180";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_TRUE(test, mode.specified);
+ KUNIT_EXPECT_EQ(test, mode.xres, 720);
+ KUNIT_EXPECT_EQ(test, mode.yres, 480);
+ KUNIT_EXPECT_EQ(test, mode.rotation_reflection, DRM_MODE_ROTATE_180);
+
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_ON);
+}
+
+static void drm_test_cmdline_freestanding_options(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "margin_right=14,margin_left=24,margin_bottom=36,margin_top=42";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_FALSE(test, mode.specified);
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_EQ(test, mode.tv_margins.right, 14);
+ KUNIT_EXPECT_EQ(test, mode.tv_margins.left, 24);
+ KUNIT_EXPECT_EQ(test, mode.tv_margins.bottom, 36);
+ KUNIT_EXPECT_EQ(test, mode.tv_margins.top, 42);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
+}
+
+static void drm_test_cmdline_freestanding_force_e_and_options(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "e,margin_right=14,margin_left=24,margin_bottom=36,margin_top=42";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_FALSE(test, mode.specified);
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_EQ(test, mode.tv_margins.right, 14);
+ KUNIT_EXPECT_EQ(test, mode.tv_margins.left, 24);
+ KUNIT_EXPECT_EQ(test, mode.tv_margins.bottom, 36);
+ KUNIT_EXPECT_EQ(test, mode.tv_margins.top, 42);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_ON);
+}
+
+static void drm_test_cmdline_panel_orientation(struct kunit *test)
+{
+ struct drm_cmdline_mode mode = { };
+ const char *cmdline = "panel_orientation=upside_down";
+
+ KUNIT_ASSERT_TRUE(test, drm_mode_parse_command_line_for_connector(cmdline,
+ &no_connector, &mode));
+ KUNIT_EXPECT_FALSE(test, mode.specified);
+ KUNIT_EXPECT_FALSE(test, mode.refresh_specified);
+ KUNIT_EXPECT_FALSE(test, mode.bpp_specified);
+
+ KUNIT_EXPECT_EQ(test, mode.panel_orientation, DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP);
+
+ KUNIT_EXPECT_FALSE(test, mode.rb);
+ KUNIT_EXPECT_FALSE(test, mode.cvt);
+ KUNIT_EXPECT_FALSE(test, mode.interlace);
+ KUNIT_EXPECT_FALSE(test, mode.margins);
+ KUNIT_EXPECT_EQ(test, mode.force, DRM_FORCE_UNSPECIFIED);
+}
+
+struct drm_cmdline_invalid_test {
+ const char *name;
+ const char *cmdline;
+};
+
+static void drm_test_cmdline_invalid(struct kunit *test)
+{
+ const struct drm_cmdline_invalid_test *params = test->param_value;
+ struct drm_cmdline_mode mode = { };
+
+ KUNIT_EXPECT_FALSE(test, drm_mode_parse_command_line_for_connector(params->cmdline,
+ &no_connector,
+ &mode));
+}
+
+static const struct drm_cmdline_invalid_test drm_cmdline_invalid_tests[] = {
+ {
+ .name = "margin_only",
+ .cmdline = "m",
+ },
+ {
+ .name = "interlace_only",
+ .cmdline = "i",
+ },
+ {
+ .name = "res_missing_x",
+ .cmdline = "x480",
+ },
+ {
+ .name = "res_missing_y",
+ .cmdline = "1024x",
+ },
+ {
+ .name = "res_bad_y",
+ .cmdline = "1024xtest",
+ },
+ {
+ .name = "res_missing_y_bpp",
+ .cmdline = "1024x-24",
+ },
+ {
+ .name = "res_bad_bpp",
+ .cmdline = "720x480-test",
+ },
+ {
+ .name = "res_bad_refresh",
+ .cmdline = "720x480@refresh",
+ },
+ {
+ .name = "res_bpp_refresh_force_on_off",
+ .cmdline = "720x480-24@60de",
+ },
+ {
+ .name = "res_invalid_mode",
+ .cmdline = "720x480f",
+ },
+ {
+ .name = "res_bpp_wrong_place_mode",
+ .cmdline = "720x480e-24",
+ },
+ {
+ .name = "name_bpp_refresh",
+ .cmdline = "NTSC-24@60",
+ },
+ {
+ .name = "name_refresh",
+ .cmdline = "NTSC@60",
+ },
+ {
+ .name = "name_refresh_wrong_mode",
+ .cmdline = "NTSC@60m",
+ },
+ {
+ .name = "name_refresh_invalid_mode",
+ .cmdline = "NTSC@60f",
+ },
+ {
+ .name = "rotate_multiple",
+ .cmdline = "720x480,rotate=0,rotate=90",
+ },
+ {
+ .name = "rotate_invalid_val",
+ .cmdline = "720x480,rotate=42",
+ },
+ {
+ .name = "rotate_truncated",
+ .cmdline = "720x480,rotate=",
+ },
+ {
+ .name = "invalid_option",
+ .cmdline = "720x480,test=42",
+ },
+};
+
+static void drm_cmdline_invalid_desc(const struct drm_cmdline_invalid_test *t,
+ char *desc)
+{
+ sprintf(desc, "%s", t->name);
+}
+
+KUNIT_ARRAY_PARAM(drm_cmdline_invalid, drm_cmdline_invalid_tests, drm_cmdline_invalid_desc);
+
+static struct kunit_case drm_cmdline_parser_tests[] = {
+ KUNIT_CASE(drm_test_cmdline_force_d_only),
+ KUNIT_CASE(drm_test_cmdline_force_D_only_dvi),
+ KUNIT_CASE(drm_test_cmdline_force_D_only_hdmi),
+ KUNIT_CASE(drm_test_cmdline_force_D_only_not_digital),
+ KUNIT_CASE(drm_test_cmdline_force_e_only),
+ KUNIT_CASE(drm_test_cmdline_res),
+ KUNIT_CASE(drm_test_cmdline_res_vesa),
+ KUNIT_CASE(drm_test_cmdline_res_vesa_rblank),
+ KUNIT_CASE(drm_test_cmdline_res_rblank),
+ KUNIT_CASE(drm_test_cmdline_res_bpp),
+ KUNIT_CASE(drm_test_cmdline_res_refresh),
+ KUNIT_CASE(drm_test_cmdline_res_bpp_refresh),
+ KUNIT_CASE(drm_test_cmdline_res_bpp_refresh_interlaced),
+ KUNIT_CASE(drm_test_cmdline_res_bpp_refresh_margins),
+ KUNIT_CASE(drm_test_cmdline_res_bpp_refresh_force_off),
+ KUNIT_CASE(drm_test_cmdline_res_bpp_refresh_force_on),
+ KUNIT_CASE(drm_test_cmdline_res_bpp_refresh_force_on_analog),
+ KUNIT_CASE(drm_test_cmdline_res_bpp_refresh_force_on_digital),
+ KUNIT_CASE(drm_test_cmdline_res_bpp_refresh_interlaced_margins_force_on),
+ KUNIT_CASE(drm_test_cmdline_res_margins_force_on),
+ KUNIT_CASE(drm_test_cmdline_res_vesa_margins),
+ KUNIT_CASE(drm_test_cmdline_name),
+ KUNIT_CASE(drm_test_cmdline_name_bpp),
+ KUNIT_CASE(drm_test_cmdline_name_option),
+ KUNIT_CASE(drm_test_cmdline_name_bpp_option),
+ KUNIT_CASE(drm_test_cmdline_rotate_0),
+ KUNIT_CASE(drm_test_cmdline_rotate_90),
+ KUNIT_CASE(drm_test_cmdline_rotate_180),
+ KUNIT_CASE(drm_test_cmdline_rotate_270),
+ KUNIT_CASE(drm_test_cmdline_hmirror),
+ KUNIT_CASE(drm_test_cmdline_vmirror),
+ KUNIT_CASE(drm_test_cmdline_margin_options),
+ KUNIT_CASE(drm_test_cmdline_multiple_options),
+ KUNIT_CASE(drm_test_cmdline_bpp_extra_and_option),
+ KUNIT_CASE(drm_test_cmdline_extra_and_option),
+ KUNIT_CASE(drm_test_cmdline_freestanding_options),
+ KUNIT_CASE(drm_test_cmdline_freestanding_force_e_and_options),
+ KUNIT_CASE(drm_test_cmdline_panel_orientation),
+ KUNIT_CASE_PARAM(drm_test_cmdline_invalid, drm_cmdline_invalid_gen_params),
+ {}
+};
+
+static struct kunit_suite drm_cmdline_parser_test_suite = {
+ .name = "drm_cmdline_parser",
+ .test_cases = drm_cmdline_parser_tests
+};
+
+kunit_test_suite(drm_cmdline_parser_test_suite);
+
+MODULE_AUTHOR("Maxime Ripard <maxime.ripard@bootlin.com>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tests/drm_damage_helper_test.c b/drivers/gpu/drm/tests/drm_damage_helper_test.c
new file mode 100644
index 000000000..115034fc3
--- /dev/null
+++ b/drivers/gpu/drm/tests/drm_damage_helper_test.c
@@ -0,0 +1,639 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test case for drm_damage_helper functions
+ *
+ * Copyright (c) 2022 Maíra Canal <mairacanal@riseup.net>
+ */
+
+#include <kunit/test.h>
+
+#include <drm/drm_damage_helper.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_plane.h>
+#include <drm/drm_drv.h>
+
+struct drm_damage_mock {
+ struct drm_driver driver;
+ struct drm_device device;
+ struct drm_object_properties obj_props;
+ struct drm_plane plane;
+ struct drm_property prop;
+ struct drm_framebuffer fb;
+ struct drm_plane_state state;
+ struct drm_plane_state old_state;
+};
+
+static int drm_damage_helper_init(struct kunit *test)
+{
+ struct drm_damage_mock *mock;
+
+ mock = kunit_kzalloc(test, sizeof(*mock), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, mock);
+
+ mock->fb.width = 2048;
+ mock->fb.height = 2048;
+
+ mock->state.crtc = ZERO_SIZE_PTR;
+ mock->state.fb = &mock->fb;
+ mock->state.visible = true;
+
+ mock->old_state.plane = &mock->plane;
+ mock->state.plane = &mock->plane;
+
+ /* just enough so that drm_plane_enable_fb_damage_clips() works */
+ mock->device.driver = &mock->driver;
+ mock->device.mode_config.prop_fb_damage_clips = &mock->prop;
+ mock->plane.dev = &mock->device;
+ mock->obj_props.count = 0;
+ mock->plane.base.properties = &mock->obj_props;
+ mock->prop.base.id = 1; /* 0 is an invalid id */
+ mock->prop.dev = &mock->device;
+
+ drm_plane_enable_fb_damage_clips(&mock->plane);
+
+ test->priv = mock;
+
+ return 0;
+}
+
+static void set_plane_src(struct drm_plane_state *state, int x1, int y1, int x2,
+ int y2)
+{
+ state->src_x = x1;
+ state->src_y = y1;
+ state->src_w = x2 - x1;
+ state->src_h = y2 - y1;
+
+ state->src.x1 = x1;
+ state->src.y1 = y1;
+ state->src.x2 = x2;
+ state->src.y2 = y2;
+}
+
+static void set_damage_clip(struct drm_mode_rect *r, int x1, int y1, int x2,
+ int y2)
+{
+ r->x1 = x1;
+ r->y1 = y1;
+ r->x2 = x2;
+ r->y2 = y2;
+}
+
+static void set_damage_blob(struct drm_property_blob *damage_blob,
+ struct drm_mode_rect *r, u32 size)
+{
+ damage_blob->length = size;
+ damage_blob->data = r;
+}
+
+static void set_plane_damage(struct drm_plane_state *state,
+ struct drm_property_blob *damage_blob)
+{
+ state->fb_damage_clips = damage_blob;
+}
+
+static void check_damage_clip(struct kunit *test, struct drm_rect *r,
+ int x1, int y1, int x2, int y2)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_plane_state state = mock->state;
+
+ /*
+ * Round down x1/y1 and round up x2/y2. This is because damage is not in
+ * 16.16 fixed point so to catch all pixels.
+ */
+ int src_x1 = state.src.x1 >> 16;
+ int src_y1 = state.src.y1 >> 16;
+ int src_x2 = (state.src.x2 >> 16) + !!(state.src.x2 & 0xFFFF);
+ int src_y2 = (state.src.y2 >> 16) + !!(state.src.y2 & 0xFFFF);
+
+ if (x1 >= x2 || y1 >= y2)
+ KUNIT_FAIL(test, "Cannot have damage clip with no dimension.");
+ if (x1 < src_x1 || y1 < src_y1 || x2 > src_x2 || y2 > src_y2)
+ KUNIT_FAIL(test, "Damage cannot be outside rounded plane src.");
+ if (r->x1 != x1 || r->y1 != y1 || r->x2 != x2 || r->y2 != y2)
+ KUNIT_FAIL(test, "Damage = %d %d %d %d, want = %d %d %d %d",
+ r->x1, r->y1, r->x2, r->y2, x1, y1, x2, y2);
+}
+
+static void drm_test_damage_iter_no_damage(struct kunit *test)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_rect clip;
+ u32 num_hits = 0;
+
+ /* Plane src same as fb size. */
+ set_plane_src(&mock->old_state, 0, 0, mock->fb.width << 16, mock->fb.height << 16);
+ set_plane_src(&mock->state, 0, 0, mock->fb.width << 16, mock->fb.height << 16);
+ drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ KUNIT_EXPECT_EQ_MSG(test, num_hits, 1, "Should return plane src as damage.");
+ check_damage_clip(test, &clip, 0, 0, 2048, 2048);
+}
+
+static void drm_test_damage_iter_no_damage_fractional_src(struct kunit *test)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_rect clip;
+ u32 num_hits = 0;
+
+ /* Plane src has fractional part. */
+ set_plane_src(&mock->old_state, 0x3fffe, 0x3fffe,
+ 0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
+ set_plane_src(&mock->state, 0x3fffe, 0x3fffe,
+ 0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
+ drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ KUNIT_EXPECT_EQ_MSG(test, num_hits, 1,
+ "Should return rounded off plane src as damage.");
+ check_damage_clip(test, &clip, 3, 3, 1028, 772);
+}
+
+static void drm_test_damage_iter_no_damage_src_moved(struct kunit *test)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_rect clip;
+ u32 num_hits = 0;
+
+ /* Plane src moved since old plane state. */
+ set_plane_src(&mock->old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&mock->state, 10 << 16, 10 << 16,
+ (10 + 1024) << 16, (10 + 768) << 16);
+ drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ KUNIT_EXPECT_EQ_MSG(test, num_hits, 1, "Should return plane src as damage.");
+ check_damage_clip(test, &clip, 10, 10, 1034, 778);
+}
+
+static void drm_test_damage_iter_no_damage_fractional_src_moved(struct kunit *test)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_rect clip;
+ u32 num_hits = 0;
+
+ /* Plane src has fractional part and it moved since old plane state. */
+ set_plane_src(&mock->old_state, 0x3fffe, 0x3fffe,
+ 0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
+ set_plane_src(&mock->state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ KUNIT_EXPECT_EQ_MSG(test, num_hits, 1, "Should return plane src as damage.");
+ check_damage_clip(test, &clip, 4, 4, 1029, 773);
+}
+
+static void drm_test_damage_iter_no_damage_not_visible(struct kunit *test)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_rect clip;
+ u32 num_hits = 0;
+
+ mock->state.visible = false;
+
+ set_plane_src(&mock->old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&mock->state, 0, 0, 1024 << 16, 768 << 16);
+ drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ KUNIT_EXPECT_EQ_MSG(test, num_hits, 0, "Should have no damage.");
+}
+
+static void drm_test_damage_iter_no_damage_no_crtc(struct kunit *test)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_rect clip;
+ u32 num_hits = 0;
+
+ mock->state.crtc = NULL;
+
+ set_plane_src(&mock->old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&mock->state, 0, 0, 1024 << 16, 768 << 16);
+ drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ KUNIT_EXPECT_EQ_MSG(test, num_hits, 0, "Should have no damage.");
+}
+
+static void drm_test_damage_iter_no_damage_no_fb(struct kunit *test)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_rect clip;
+ u32 num_hits = 0;
+
+ mock->state.fb = NULL;
+
+ set_plane_src(&mock->old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&mock->state, 0, 0, 1024 << 16, 768 << 16);
+ drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ KUNIT_EXPECT_EQ_MSG(test, num_hits, 0, "Should have no damage.");
+}
+
+static void drm_test_damage_iter_simple_damage(struct kunit *test)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage;
+ struct drm_rect clip;
+ u32 num_hits = 0;
+
+ set_plane_src(&mock->old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&mock->state, 0, 0, 1024 << 16, 768 << 16);
+ /* Damage set to plane src */
+ set_damage_clip(&damage, 0, 0, 1024, 768);
+ set_damage_blob(&damage_blob, &damage, sizeof(damage));
+ set_plane_damage(&mock->state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ KUNIT_EXPECT_EQ_MSG(test, num_hits, 1, "Should return damage when set.");
+ check_damage_clip(test, &clip, 0, 0, 1024, 768);
+}
+
+static void drm_test_damage_iter_single_damage(struct kunit *test)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage;
+ struct drm_rect clip;
+ u32 num_hits = 0;
+
+ set_plane_src(&mock->old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&mock->state, 0, 0, 1024 << 16, 768 << 16);
+ set_damage_clip(&damage, 256, 192, 768, 576);
+ set_damage_blob(&damage_blob, &damage, sizeof(damage));
+ set_plane_damage(&mock->state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ KUNIT_EXPECT_EQ_MSG(test, num_hits, 1, "Should return damage when set.");
+ check_damage_clip(test, &clip, 256, 192, 768, 576);
+}
+
+static void drm_test_damage_iter_single_damage_intersect_src(struct kunit *test)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage;
+ struct drm_rect clip;
+ u32 num_hits = 0;
+
+ set_plane_src(&mock->old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&mock->state, 0, 0, 1024 << 16, 768 << 16);
+ /* Damage intersect with plane src. */
+ set_damage_clip(&damage, 256, 192, 1360, 768);
+ set_damage_blob(&damage_blob, &damage, sizeof(damage));
+ set_plane_damage(&mock->state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ KUNIT_EXPECT_EQ_MSG(test, num_hits, 1, "Should return damage clipped to src.");
+ check_damage_clip(test, &clip, 256, 192, 1024, 768);
+}
+
+static void drm_test_damage_iter_single_damage_outside_src(struct kunit *test)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage;
+ struct drm_rect clip;
+ u32 num_hits = 0;
+
+ set_plane_src(&mock->old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&mock->state, 0, 0, 1024 << 16, 768 << 16);
+ /* Damage clip outside plane src */
+ set_damage_clip(&damage, 1360, 1360, 1380, 1380);
+ set_damage_blob(&damage_blob, &damage, sizeof(damage));
+ set_plane_damage(&mock->state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ KUNIT_EXPECT_EQ_MSG(test, num_hits, 0, "Should have no damage.");
+}
+
+static void drm_test_damage_iter_single_damage_fractional_src(struct kunit *test)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage;
+ struct drm_rect clip;
+ u32 num_hits = 0;
+
+ /* Plane src has fractional part. */
+ set_plane_src(&mock->old_state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ set_plane_src(&mock->state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ set_damage_clip(&damage, 10, 10, 256, 330);
+ set_damage_blob(&damage_blob, &damage, sizeof(damage));
+ set_plane_damage(&mock->state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ KUNIT_EXPECT_EQ_MSG(test, num_hits, 1, "Should return damage when set.");
+ check_damage_clip(test, &clip, 10, 10, 256, 330);
+}
+
+static void drm_test_damage_iter_single_damage_intersect_fractional_src(struct kunit *test)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage;
+ struct drm_rect clip;
+ u32 num_hits = 0;
+
+ /* Plane src has fractional part. */
+ set_plane_src(&mock->old_state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ set_plane_src(&mock->state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ /* Damage intersect with plane src. */
+ set_damage_clip(&damage, 10, 1, 1360, 330);
+ set_damage_blob(&damage_blob, &damage, sizeof(damage));
+ set_plane_damage(&mock->state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ KUNIT_EXPECT_EQ_MSG(test, num_hits, 1,
+ "Should return damage clipped to rounded off src.");
+ check_damage_clip(test, &clip, 10, 4, 1029, 330);
+}
+
+static void drm_test_damage_iter_single_damage_outside_fractional_src(struct kunit *test)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage;
+ struct drm_rect clip;
+ u32 num_hits = 0;
+
+ /* Plane src has fractional part. */
+ set_plane_src(&mock->old_state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ set_plane_src(&mock->state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ /* Damage clip outside plane src */
+ set_damage_clip(&damage, 1360, 1360, 1380, 1380);
+ set_damage_blob(&damage_blob, &damage, sizeof(damage));
+ set_plane_damage(&mock->state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ KUNIT_EXPECT_EQ_MSG(test, num_hits, 0, "Should have no damage.");
+}
+
+static void drm_test_damage_iter_single_damage_src_moved(struct kunit *test)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage;
+ struct drm_rect clip;
+ u32 num_hits = 0;
+
+ /* Plane src moved since old plane state. */
+ set_plane_src(&mock->old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&mock->state, 10 << 16, 10 << 16,
+ (10 + 1024) << 16, (10 + 768) << 16);
+ set_damage_clip(&damage, 20, 30, 256, 256);
+ set_damage_blob(&damage_blob, &damage, sizeof(damage));
+ set_plane_damage(&mock->state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ KUNIT_EXPECT_EQ_MSG(test, num_hits, 1,
+ "Should return plane src as damage.");
+ check_damage_clip(test, &clip, 10, 10, 1034, 778);
+}
+
+static void drm_test_damage_iter_single_damage_fractional_src_moved(struct kunit *test)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage;
+ struct drm_rect clip;
+ u32 num_hits = 0;
+
+ /* Plane src with fractional part moved since old plane state. */
+ set_plane_src(&mock->old_state, 0x3fffe, 0x3fffe,
+ 0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
+ set_plane_src(&mock->state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ /* Damage intersect with plane src. */
+ set_damage_clip(&damage, 20, 30, 1360, 256);
+ set_damage_blob(&damage_blob, &damage, sizeof(damage));
+ set_plane_damage(&mock->state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ KUNIT_EXPECT_EQ_MSG(test, num_hits, 1,
+ "Should return rounded off plane as damage.");
+ check_damage_clip(test, &clip, 4, 4, 1029, 773);
+}
+
+static void drm_test_damage_iter_damage(struct kunit *test)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage[2];
+ struct drm_rect clip;
+ u32 num_hits = 0;
+
+ set_plane_src(&mock->old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&mock->state, 0, 0, 1024 << 16, 768 << 16);
+ /* 2 damage clips. */
+ set_damage_clip(&damage[0], 20, 30, 200, 180);
+ set_damage_clip(&damage[1], 240, 200, 280, 250);
+ set_damage_blob(&damage_blob, &damage[0], sizeof(damage));
+ set_plane_damage(&mock->state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
+ drm_atomic_for_each_plane_damage(&iter, &clip) {
+ if (num_hits == 0)
+ check_damage_clip(test, &clip, 20, 30, 200, 180);
+ if (num_hits == 1)
+ check_damage_clip(test, &clip, 240, 200, 280, 250);
+ num_hits++;
+ }
+
+ KUNIT_EXPECT_EQ_MSG(test, num_hits, 2, "Should return damage when set.");
+}
+
+static void drm_test_damage_iter_damage_one_intersect(struct kunit *test)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage[2];
+ struct drm_rect clip;
+ u32 num_hits = 0;
+
+ set_plane_src(&mock->old_state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ set_plane_src(&mock->state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ /* 2 damage clips, one intersect plane src. */
+ set_damage_clip(&damage[0], 20, 30, 200, 180);
+ set_damage_clip(&damage[1], 2, 2, 1360, 1360);
+ set_damage_blob(&damage_blob, &damage[0], sizeof(damage));
+ set_plane_damage(&mock->state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
+ drm_atomic_for_each_plane_damage(&iter, &clip) {
+ if (num_hits == 0)
+ check_damage_clip(test, &clip, 20, 30, 200, 180);
+ if (num_hits == 1)
+ check_damage_clip(test, &clip, 4, 4, 1029, 773);
+ num_hits++;
+ }
+
+ KUNIT_EXPECT_EQ_MSG(test, num_hits, 2, "Should return damage when set.");
+}
+
+static void drm_test_damage_iter_damage_one_outside(struct kunit *test)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage[2];
+ struct drm_rect clip;
+ u32 num_hits = 0;
+
+ set_plane_src(&mock->old_state, 0, 0, 1024 << 16, 768 << 16);
+ set_plane_src(&mock->state, 0, 0, 1024 << 16, 768 << 16);
+ /* 2 damage clips, one outside plane src. */
+ set_damage_clip(&damage[0], 1360, 1360, 1380, 1380);
+ set_damage_clip(&damage[1], 240, 200, 280, 250);
+ set_damage_blob(&damage_blob, &damage[0], sizeof(damage));
+ set_plane_damage(&mock->state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ KUNIT_EXPECT_EQ_MSG(test, num_hits, 1, "Should return damage when set.");
+ check_damage_clip(test, &clip, 240, 200, 280, 250);
+}
+
+static void drm_test_damage_iter_damage_src_moved(struct kunit *test)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage[2];
+ struct drm_rect clip;
+ u32 num_hits = 0;
+
+ set_plane_src(&mock->old_state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ set_plane_src(&mock->state, 0x3fffe, 0x3fffe,
+ 0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
+ /* 2 damage clips, one outside plane src. */
+ set_damage_clip(&damage[0], 1360, 1360, 1380, 1380);
+ set_damage_clip(&damage[1], 240, 200, 280, 250);
+ set_damage_blob(&damage_blob, &damage[0], sizeof(damage));
+ set_plane_damage(&mock->state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ KUNIT_EXPECT_EQ_MSG(test, num_hits, 1,
+ "Should return round off plane src as damage.");
+ check_damage_clip(test, &clip, 3, 3, 1028, 772);
+}
+
+static void drm_test_damage_iter_damage_not_visible(struct kunit *test)
+{
+ struct drm_damage_mock *mock = test->priv;
+ struct drm_atomic_helper_damage_iter iter;
+ struct drm_property_blob damage_blob;
+ struct drm_mode_rect damage[2];
+ struct drm_rect clip;
+ u32 num_hits = 0;
+
+ mock->state.visible = false;
+
+ set_plane_src(&mock->old_state, 0x40002, 0x40002,
+ 0x40002 + (1024 << 16), 0x40002 + (768 << 16));
+ set_plane_src(&mock->state, 0x3fffe, 0x3fffe,
+ 0x3fffe + (1024 << 16), 0x3fffe + (768 << 16));
+ /* 2 damage clips, one outside plane src. */
+ set_damage_clip(&damage[0], 1360, 1360, 1380, 1380);
+ set_damage_clip(&damage[1], 240, 200, 280, 250);
+ set_damage_blob(&damage_blob, &damage[0], sizeof(damage));
+ set_plane_damage(&mock->state, &damage_blob);
+ drm_atomic_helper_damage_iter_init(&iter, &mock->old_state, &mock->state);
+ drm_atomic_for_each_plane_damage(&iter, &clip)
+ num_hits++;
+
+ KUNIT_EXPECT_EQ_MSG(test, num_hits, 0, "Should not return any damage.");
+}
+
+static struct kunit_case drm_damage_helper_tests[] = {
+ KUNIT_CASE(drm_test_damage_iter_no_damage),
+ KUNIT_CASE(drm_test_damage_iter_no_damage_fractional_src),
+ KUNIT_CASE(drm_test_damage_iter_no_damage_src_moved),
+ KUNIT_CASE(drm_test_damage_iter_no_damage_fractional_src_moved),
+ KUNIT_CASE(drm_test_damage_iter_no_damage_not_visible),
+ KUNIT_CASE(drm_test_damage_iter_no_damage_no_crtc),
+ KUNIT_CASE(drm_test_damage_iter_no_damage_no_fb),
+ KUNIT_CASE(drm_test_damage_iter_simple_damage),
+ KUNIT_CASE(drm_test_damage_iter_single_damage),
+ KUNIT_CASE(drm_test_damage_iter_single_damage_intersect_src),
+ KUNIT_CASE(drm_test_damage_iter_single_damage_outside_src),
+ KUNIT_CASE(drm_test_damage_iter_single_damage_fractional_src),
+ KUNIT_CASE(drm_test_damage_iter_single_damage_intersect_fractional_src),
+ KUNIT_CASE(drm_test_damage_iter_single_damage_outside_fractional_src),
+ KUNIT_CASE(drm_test_damage_iter_single_damage_src_moved),
+ KUNIT_CASE(drm_test_damage_iter_single_damage_fractional_src_moved),
+ KUNIT_CASE(drm_test_damage_iter_damage),
+ KUNIT_CASE(drm_test_damage_iter_damage_one_intersect),
+ KUNIT_CASE(drm_test_damage_iter_damage_one_outside),
+ KUNIT_CASE(drm_test_damage_iter_damage_src_moved),
+ KUNIT_CASE(drm_test_damage_iter_damage_not_visible),
+ { }
+};
+
+static struct kunit_suite drm_damage_helper_test_suite = {
+ .name = "drm_damage_helper",
+ .init = drm_damage_helper_init,
+ .test_cases = drm_damage_helper_tests,
+};
+
+kunit_test_suite(drm_damage_helper_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tests/drm_dp_mst_helper_test.c b/drivers/gpu/drm/tests/drm_dp_mst_helper_test.c
new file mode 100644
index 000000000..65c9d225b
--- /dev/null
+++ b/drivers/gpu/drm/tests/drm_dp_mst_helper_test.c
@@ -0,0 +1,286 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Test cases for the DRM DP MST helpers
+ *
+ * Copyright (c) 2022 Maíra Canal <mairacanal@riseup.net>
+ */
+
+#define PREFIX_STR "[drm_dp_mst_helper]"
+
+#include <kunit/test.h>
+
+#include <linux/random.h>
+
+#include <drm/display/drm_dp_mst_helper.h>
+#include <drm/drm_print.h>
+
+#include "../display/drm_dp_mst_topology_internal.h"
+
+static void drm_test_dp_mst_calc_pbn_mode(struct kunit *test)
+{
+ int pbn, i;
+ const struct {
+ int rate;
+ int bpp;
+ int expected;
+ bool dsc;
+ } test_params[] = {
+ { 154000, 30, 689, false },
+ { 234000, 30, 1047, false },
+ { 297000, 24, 1063, false },
+ { 332880, 24, 50, true },
+ { 324540, 24, 49, true },
+ };
+
+ for (i = 0; i < ARRAY_SIZE(test_params); i++) {
+ pbn = drm_dp_calc_pbn_mode(test_params[i].rate,
+ test_params[i].bpp,
+ test_params[i].dsc);
+ KUNIT_EXPECT_EQ_MSG(test, pbn, test_params[i].expected,
+ "Expected PBN %d for clock %d bpp %d, got %d\n",
+ test_params[i].expected, test_params[i].rate,
+ test_params[i].bpp, pbn);
+ }
+}
+
+static bool
+sideband_msg_req_equal(const struct drm_dp_sideband_msg_req_body *in,
+ const struct drm_dp_sideband_msg_req_body *out)
+{
+ const struct drm_dp_remote_i2c_read_tx *txin, *txout;
+ int i;
+
+ if (in->req_type != out->req_type)
+ return false;
+
+ switch (in->req_type) {
+ /*
+ * Compare struct members manually for request types which can't be
+ * compared simply using memcmp(). This is because said request types
+ * contain pointers to other allocated structs
+ */
+ case DP_REMOTE_I2C_READ:
+#define IN in->u.i2c_read
+#define OUT out->u.i2c_read
+ if (IN.num_bytes_read != OUT.num_bytes_read ||
+ IN.num_transactions != OUT.num_transactions ||
+ IN.port_number != OUT.port_number ||
+ IN.read_i2c_device_id != OUT.read_i2c_device_id)
+ return false;
+
+ for (i = 0; i < IN.num_transactions; i++) {
+ txin = &IN.transactions[i];
+ txout = &OUT.transactions[i];
+
+ if (txin->i2c_dev_id != txout->i2c_dev_id ||
+ txin->no_stop_bit != txout->no_stop_bit ||
+ txin->num_bytes != txout->num_bytes ||
+ txin->i2c_transaction_delay !=
+ txout->i2c_transaction_delay)
+ return false;
+
+ if (memcmp(txin->bytes, txout->bytes,
+ txin->num_bytes) != 0)
+ return false;
+ }
+ break;
+#undef IN
+#undef OUT
+
+ case DP_REMOTE_DPCD_WRITE:
+#define IN in->u.dpcd_write
+#define OUT out->u.dpcd_write
+ if (IN.dpcd_address != OUT.dpcd_address ||
+ IN.num_bytes != OUT.num_bytes ||
+ IN.port_number != OUT.port_number)
+ return false;
+
+ return memcmp(IN.bytes, OUT.bytes, IN.num_bytes) == 0;
+#undef IN
+#undef OUT
+
+ case DP_REMOTE_I2C_WRITE:
+#define IN in->u.i2c_write
+#define OUT out->u.i2c_write
+ if (IN.port_number != OUT.port_number ||
+ IN.write_i2c_device_id != OUT.write_i2c_device_id ||
+ IN.num_bytes != OUT.num_bytes)
+ return false;
+
+ return memcmp(IN.bytes, OUT.bytes, IN.num_bytes) == 0;
+#undef IN
+#undef OUT
+
+ default:
+ return memcmp(in, out, sizeof(*in)) == 0;
+ }
+
+ return true;
+}
+
+static bool
+sideband_msg_req_encode_decode(struct drm_dp_sideband_msg_req_body *in)
+{
+ struct drm_dp_sideband_msg_req_body *out;
+ struct drm_printer p = drm_err_printer(PREFIX_STR);
+ struct drm_dp_sideband_msg_tx *txmsg;
+ int i, ret;
+ bool result = true;
+
+ out = kzalloc(sizeof(*out), GFP_KERNEL);
+ if (!out)
+ return false;
+
+ txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
+ if (!txmsg) {
+ kfree(out);
+ return false;
+ }
+
+ drm_dp_encode_sideband_req(in, txmsg);
+ ret = drm_dp_decode_sideband_req(txmsg, out);
+ if (ret < 0) {
+ drm_printf(&p, "Failed to decode sideband request: %d\n",
+ ret);
+ result = false;
+ goto out;
+ }
+
+ if (!sideband_msg_req_equal(in, out)) {
+ drm_printf(&p, "Encode/decode failed, expected:\n");
+ drm_dp_dump_sideband_msg_req_body(in, 1, &p);
+ drm_printf(&p, "Got:\n");
+ drm_dp_dump_sideband_msg_req_body(out, 1, &p);
+ result = false;
+ goto out;
+ }
+
+ switch (in->req_type) {
+ case DP_REMOTE_DPCD_WRITE:
+ kfree(out->u.dpcd_write.bytes);
+ break;
+ case DP_REMOTE_I2C_READ:
+ for (i = 0; i < out->u.i2c_read.num_transactions; i++)
+ kfree(out->u.i2c_read.transactions[i].bytes);
+ break;
+ case DP_REMOTE_I2C_WRITE:
+ kfree(out->u.i2c_write.bytes);
+ break;
+ }
+
+ /* Clear everything but the req_type for the input */
+ memset(&in->u, 0, sizeof(in->u));
+
+out:
+ kfree(out);
+ kfree(txmsg);
+ return result;
+}
+
+static void drm_test_dp_mst_sideband_msg_req_decode(struct kunit *test)
+{
+ struct drm_dp_sideband_msg_req_body in = { 0 };
+ u8 data[] = { 0xff, 0x0, 0xdd };
+ int i;
+
+ in.req_type = DP_ENUM_PATH_RESOURCES;
+ in.u.port_num.port_number = 5;
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
+
+ in.req_type = DP_POWER_UP_PHY;
+ in.u.port_num.port_number = 5;
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
+
+ in.req_type = DP_POWER_DOWN_PHY;
+ in.u.port_num.port_number = 5;
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
+
+ in.req_type = DP_ALLOCATE_PAYLOAD;
+ in.u.allocate_payload.number_sdp_streams = 3;
+ for (i = 0; i < in.u.allocate_payload.number_sdp_streams; i++)
+ in.u.allocate_payload.sdp_stream_sink[i] = i + 1;
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
+ in.u.allocate_payload.port_number = 0xf;
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
+ in.u.allocate_payload.vcpi = 0x7f;
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
+ in.u.allocate_payload.pbn = U16_MAX;
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
+
+ in.req_type = DP_QUERY_PAYLOAD;
+ in.u.query_payload.port_number = 0xf;
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
+ in.u.query_payload.vcpi = 0x7f;
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
+
+ in.req_type = DP_REMOTE_DPCD_READ;
+ in.u.dpcd_read.port_number = 0xf;
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
+ in.u.dpcd_read.dpcd_address = 0xfedcb;
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
+ in.u.dpcd_read.num_bytes = U8_MAX;
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
+
+ in.req_type = DP_REMOTE_DPCD_WRITE;
+ in.u.dpcd_write.port_number = 0xf;
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
+ in.u.dpcd_write.dpcd_address = 0xfedcb;
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
+ in.u.dpcd_write.num_bytes = ARRAY_SIZE(data);
+ in.u.dpcd_write.bytes = data;
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
+
+ in.req_type = DP_REMOTE_I2C_READ;
+ in.u.i2c_read.port_number = 0xf;
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
+ in.u.i2c_read.read_i2c_device_id = 0x7f;
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
+ in.u.i2c_read.num_transactions = 3;
+ in.u.i2c_read.num_bytes_read = ARRAY_SIZE(data) * 3;
+ for (i = 0; i < in.u.i2c_read.num_transactions; i++) {
+ in.u.i2c_read.transactions[i].bytes = data;
+ in.u.i2c_read.transactions[i].num_bytes = ARRAY_SIZE(data);
+ in.u.i2c_read.transactions[i].i2c_dev_id = 0x7f & ~i;
+ in.u.i2c_read.transactions[i].i2c_transaction_delay = 0xf & ~i;
+ }
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
+
+ in.req_type = DP_REMOTE_I2C_WRITE;
+ in.u.i2c_write.port_number = 0xf;
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
+ in.u.i2c_write.write_i2c_device_id = 0x7f;
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
+ in.u.i2c_write.num_bytes = ARRAY_SIZE(data);
+ in.u.i2c_write.bytes = data;
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
+
+ in.req_type = DP_QUERY_STREAM_ENC_STATUS;
+ in.u.enc_status.stream_id = 1;
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
+ get_random_bytes(in.u.enc_status.client_id,
+ sizeof(in.u.enc_status.client_id));
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
+ in.u.enc_status.stream_event = 3;
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
+ in.u.enc_status.valid_stream_event = 0;
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
+ in.u.enc_status.stream_behavior = 3;
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
+ in.u.enc_status.valid_stream_behavior = 1;
+ KUNIT_EXPECT_TRUE(test, sideband_msg_req_encode_decode(&in));
+}
+
+static struct kunit_case drm_dp_mst_helper_tests[] = {
+ KUNIT_CASE(drm_test_dp_mst_calc_pbn_mode),
+ KUNIT_CASE(drm_test_dp_mst_sideband_msg_req_decode),
+ { }
+};
+
+static struct kunit_suite drm_dp_mst_helper_test_suite = {
+ .name = "drm_dp_mst_helper",
+ .test_cases = drm_dp_mst_helper_tests,
+};
+
+kunit_test_suite(drm_dp_mst_helper_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tests/drm_format_helper_test.c b/drivers/gpu/drm/tests/drm_format_helper_test.c
new file mode 100644
index 000000000..2191e57f2
--- /dev/null
+++ b/drivers/gpu/drm/tests/drm_format_helper_test.c
@@ -0,0 +1,463 @@
+// SPDX-License-Identifier: GPL-2.0+
+
+#include <kunit/test.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_file.h>
+#include <drm/drm_format_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_mode.h>
+#include <drm/drm_print.h>
+#include <drm/drm_rect.h>
+
+#include "../drm_crtc_internal.h"
+
+#define TEST_BUF_SIZE 50
+
+struct convert_to_gray8_result {
+ unsigned int dst_pitch;
+ const u8 expected[TEST_BUF_SIZE];
+};
+
+struct convert_to_rgb332_result {
+ unsigned int dst_pitch;
+ const u8 expected[TEST_BUF_SIZE];
+};
+
+struct convert_to_rgb565_result {
+ unsigned int dst_pitch;
+ const u16 expected[TEST_BUF_SIZE];
+ const u16 expected_swab[TEST_BUF_SIZE];
+};
+
+struct convert_to_rgb888_result {
+ unsigned int dst_pitch;
+ const u8 expected[TEST_BUF_SIZE];
+};
+
+struct convert_to_xrgb2101010_result {
+ unsigned int dst_pitch;
+ const u32 expected[TEST_BUF_SIZE];
+};
+
+struct convert_xrgb8888_case {
+ const char *name;
+ unsigned int pitch;
+ struct drm_rect clip;
+ const u32 xrgb8888[TEST_BUF_SIZE];
+ struct convert_to_gray8_result gray8_result;
+ struct convert_to_rgb332_result rgb332_result;
+ struct convert_to_rgb565_result rgb565_result;
+ struct convert_to_rgb888_result rgb888_result;
+ struct convert_to_xrgb2101010_result xrgb2101010_result;
+};
+
+static struct convert_xrgb8888_case convert_xrgb8888_cases[] = {
+ {
+ .name = "single_pixel_source_buffer",
+ .pitch = 1 * 4,
+ .clip = DRM_RECT_INIT(0, 0, 1, 1),
+ .xrgb8888 = { 0x01FF0000 },
+ .gray8_result = {
+ .dst_pitch = 0,
+ .expected = { 0x4C },
+ },
+ .rgb332_result = {
+ .dst_pitch = 0,
+ .expected = { 0xE0 },
+ },
+ .rgb565_result = {
+ .dst_pitch = 0,
+ .expected = { 0xF800 },
+ .expected_swab = { 0x00F8 },
+ },
+ .rgb888_result = {
+ .dst_pitch = 0,
+ .expected = { 0x00, 0x00, 0xFF },
+ },
+ .xrgb2101010_result = {
+ .dst_pitch = 0,
+ .expected = { 0x3FF00000 },
+ },
+ },
+ {
+ .name = "single_pixel_clip_rectangle",
+ .pitch = 2 * 4,
+ .clip = DRM_RECT_INIT(1, 1, 1, 1),
+ .xrgb8888 = {
+ 0x00000000, 0x00000000,
+ 0x00000000, 0x10FF0000,
+ },
+ .gray8_result = {
+ .dst_pitch = 0,
+ .expected = { 0x4C },
+ },
+ .rgb332_result = {
+ .dst_pitch = 0,
+ .expected = { 0xE0 },
+ },
+ .rgb565_result = {
+ .dst_pitch = 0,
+ .expected = { 0xF800 },
+ .expected_swab = { 0x00F8 },
+ },
+ .rgb888_result = {
+ .dst_pitch = 0,
+ .expected = { 0x00, 0x00, 0xFF },
+ },
+ .xrgb2101010_result = {
+ .dst_pitch = 0,
+ .expected = { 0x3FF00000 },
+ },
+ },
+ {
+ /* Well known colors: White, black, red, green, blue, magenta,
+ * yellow and cyan. Different values for the X in XRGB8888 to
+ * make sure it is ignored. Partial clip area.
+ */
+ .name = "well_known_colors",
+ .pitch = 4 * 4,
+ .clip = DRM_RECT_INIT(1, 1, 2, 4),
+ .xrgb8888 = {
+ 0x00000000, 0x00000000, 0x00000000, 0x00000000,
+ 0x00000000, 0x11FFFFFF, 0x22000000, 0x00000000,
+ 0x00000000, 0x33FF0000, 0x4400FF00, 0x00000000,
+ 0x00000000, 0x550000FF, 0x66FF00FF, 0x00000000,
+ 0x00000000, 0x77FFFF00, 0x8800FFFF, 0x00000000,
+ },
+ .gray8_result = {
+ .dst_pitch = 0,
+ .expected = {
+ 0xFF, 0x00,
+ 0x4C, 0x99,
+ 0x19, 0x66,
+ 0xE5, 0xB2,
+ },
+ },
+ .rgb332_result = {
+ .dst_pitch = 0,
+ .expected = {
+ 0xFF, 0x00,
+ 0xE0, 0x1C,
+ 0x03, 0xE3,
+ 0xFC, 0x1F,
+ },
+ },
+ .rgb565_result = {
+ .dst_pitch = 0,
+ .expected = {
+ 0xFFFF, 0x0000,
+ 0xF800, 0x07E0,
+ 0x001F, 0xF81F,
+ 0xFFE0, 0x07FF,
+ },
+ .expected_swab = {
+ 0xFFFF, 0x0000,
+ 0x00F8, 0xE007,
+ 0x1F00, 0x1FF8,
+ 0xE0FF, 0xFF07,
+ },
+ },
+ .rgb888_result = {
+ .dst_pitch = 0,
+ .expected = {
+ 0xFF, 0xFF, 0xFF, 0x00, 0x00, 0x00,
+ 0x00, 0x00, 0xFF, 0x00, 0xFF, 0x00,
+ 0xFF, 0x00, 0x00, 0xFF, 0x00, 0xFF,
+ 0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0x00,
+ },
+ },
+ .xrgb2101010_result = {
+ .dst_pitch = 0,
+ .expected = {
+ 0x3FFFFFFF, 0x00000000,
+ 0x3FF00000, 0x000FFC00,
+ 0x000003FF, 0x3FF003FF,
+ 0x3FFFFC00, 0x000FFFFF,
+ },
+ },
+ },
+ {
+ /* Randomly picked colors. Full buffer within the clip area. */
+ .name = "destination_pitch",
+ .pitch = 3 * 4,
+ .clip = DRM_RECT_INIT(0, 0, 3, 3),
+ .xrgb8888 = {
+ 0xA10E449C, 0xB1114D05, 0xC1A80303,
+ 0xD16C7073, 0xA20E449C, 0xB2114D05,
+ 0xC2A80303, 0xD26C7073, 0xA30E449C,
+ },
+ .gray8_result = {
+ .dst_pitch = 5,
+ .expected = {
+ 0x3C, 0x33, 0x34, 0x00, 0x00,
+ 0x6F, 0x3C, 0x33, 0x00, 0x00,
+ 0x34, 0x6F, 0x3C, 0x00, 0x00,
+ },
+ },
+ .rgb332_result = {
+ .dst_pitch = 5,
+ .expected = {
+ 0x0A, 0x08, 0xA0, 0x00, 0x00,
+ 0x6D, 0x0A, 0x08, 0x00, 0x00,
+ 0xA0, 0x6D, 0x0A, 0x00, 0x00,
+ },
+ },
+ .rgb565_result = {
+ .dst_pitch = 10,
+ .expected = {
+ 0x0A33, 0x1260, 0xA800, 0x0000, 0x0000,
+ 0x6B8E, 0x0A33, 0x1260, 0x0000, 0x0000,
+ 0xA800, 0x6B8E, 0x0A33, 0x0000, 0x0000,
+ },
+ .expected_swab = {
+ 0x330A, 0x6012, 0x00A8, 0x0000, 0x0000,
+ 0x8E6B, 0x330A, 0x6012, 0x0000, 0x0000,
+ 0x00A8, 0x8E6B, 0x330A, 0x0000, 0x0000,
+ },
+ },
+ .rgb888_result = {
+ .dst_pitch = 15,
+ .expected = {
+ 0x9C, 0x44, 0x0E, 0x05, 0x4D, 0x11, 0x03, 0x03, 0xA8,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x73, 0x70, 0x6C, 0x9C, 0x44, 0x0E, 0x05, 0x4D, 0x11,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ 0x03, 0x03, 0xA8, 0x73, 0x70, 0x6C, 0x9C, 0x44, 0x0E,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+ },
+ },
+ .xrgb2101010_result = {
+ .dst_pitch = 20,
+ .expected = {
+ 0x03844672, 0x0444D414, 0x2A20300C, 0x00000000, 0x00000000,
+ 0x1B1705CD, 0x03844672, 0x0444D414, 0x00000000, 0x00000000,
+ 0x2A20300C, 0x1B1705CD, 0x03844672, 0x00000000, 0x00000000,
+ },
+ },
+ },
+};
+
+/*
+ * conversion_buf_size - Return the destination buffer size required to convert
+ * between formats.
+ * @dst_format: destination buffer pixel format (DRM_FORMAT_*)
+ * @dst_pitch: Number of bytes between two consecutive scanlines within dst
+ * @clip: Clip rectangle area to convert
+ *
+ * Returns:
+ * The size of the destination buffer or negative value on error.
+ */
+static size_t conversion_buf_size(u32 dst_format, unsigned int dst_pitch,
+ const struct drm_rect *clip)
+{
+ const struct drm_format_info *dst_fi = drm_format_info(dst_format);
+
+ if (!dst_fi)
+ return -EINVAL;
+
+ if (!dst_pitch)
+ dst_pitch = drm_rect_width(clip) * dst_fi->cpp[0];
+
+ return dst_pitch * drm_rect_height(clip);
+}
+
+static u32 *le32buf_to_cpu(struct kunit *test, const u32 *buf, size_t buf_size)
+{
+ u32 *dst = NULL;
+ int n;
+
+ dst = kunit_kzalloc(test, sizeof(*dst) * buf_size, GFP_KERNEL);
+ if (!dst)
+ return NULL;
+
+ for (n = 0; n < buf_size; n++)
+ dst[n] = le32_to_cpu((__force __le32)buf[n]);
+
+ return dst;
+}
+
+static void convert_xrgb8888_case_desc(struct convert_xrgb8888_case *t,
+ char *desc)
+{
+ strscpy(desc, t->name, KUNIT_PARAM_DESC_SIZE);
+}
+
+KUNIT_ARRAY_PARAM(convert_xrgb8888, convert_xrgb8888_cases,
+ convert_xrgb8888_case_desc);
+
+static void drm_test_fb_xrgb8888_to_gray8(struct kunit *test)
+{
+ const struct convert_xrgb8888_case *params = test->param_value;
+ const struct convert_to_gray8_result *result = &params->gray8_result;
+ size_t dst_size;
+ __u8 *buf = NULL;
+ __u32 *xrgb8888 = NULL;
+ struct iosys_map dst, src;
+
+ struct drm_framebuffer fb = {
+ .format = drm_format_info(DRM_FORMAT_XRGB8888),
+ .pitches = { params->pitch, 0, 0 },
+ };
+
+ dst_size = conversion_buf_size(DRM_FORMAT_R8, result->dst_pitch,
+ &params->clip);
+ KUNIT_ASSERT_GT(test, dst_size, 0);
+
+ buf = kunit_kzalloc(test, dst_size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
+ iosys_map_set_vaddr(&dst, buf);
+
+ xrgb8888 = le32buf_to_cpu(test, params->xrgb8888, TEST_BUF_SIZE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xrgb8888);
+ iosys_map_set_vaddr(&src, xrgb8888);
+
+ drm_fb_xrgb8888_to_gray8(&dst, &result->dst_pitch, &src, &fb, &params->clip);
+ KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected, dst_size), 0);
+}
+
+static void drm_test_fb_xrgb8888_to_rgb332(struct kunit *test)
+{
+ const struct convert_xrgb8888_case *params = test->param_value;
+ const struct convert_to_rgb332_result *result = &params->rgb332_result;
+ size_t dst_size;
+ __u8 *buf = NULL;
+ __u32 *xrgb8888 = NULL;
+ struct iosys_map dst, src;
+
+ struct drm_framebuffer fb = {
+ .format = drm_format_info(DRM_FORMAT_XRGB8888),
+ .pitches = { params->pitch, 0, 0 },
+ };
+
+ dst_size = conversion_buf_size(DRM_FORMAT_RGB332, result->dst_pitch,
+ &params->clip);
+ KUNIT_ASSERT_GT(test, dst_size, 0);
+
+ buf = kunit_kzalloc(test, dst_size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
+ iosys_map_set_vaddr(&dst, buf);
+
+ xrgb8888 = le32buf_to_cpu(test, params->xrgb8888, TEST_BUF_SIZE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xrgb8888);
+ iosys_map_set_vaddr(&src, xrgb8888);
+
+ drm_fb_xrgb8888_to_rgb332(&dst, &result->dst_pitch, &src, &fb, &params->clip);
+ KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected, dst_size), 0);
+}
+
+static void drm_test_fb_xrgb8888_to_rgb565(struct kunit *test)
+{
+ const struct convert_xrgb8888_case *params = test->param_value;
+ const struct convert_to_rgb565_result *result = &params->rgb565_result;
+ size_t dst_size;
+ __u16 *buf = NULL;
+ __u32 *xrgb8888 = NULL;
+ struct iosys_map dst, src;
+
+ struct drm_framebuffer fb = {
+ .format = drm_format_info(DRM_FORMAT_XRGB8888),
+ .pitches = { params->pitch, 0, 0 },
+ };
+
+ dst_size = conversion_buf_size(DRM_FORMAT_RGB565, result->dst_pitch,
+ &params->clip);
+ KUNIT_ASSERT_GT(test, dst_size, 0);
+
+ buf = kunit_kzalloc(test, dst_size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
+ iosys_map_set_vaddr(&dst, buf);
+
+ xrgb8888 = le32buf_to_cpu(test, params->xrgb8888, TEST_BUF_SIZE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xrgb8888);
+ iosys_map_set_vaddr(&src, xrgb8888);
+
+ drm_fb_xrgb8888_to_rgb565(&dst, &result->dst_pitch, &src, &fb, &params->clip, false);
+ KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected, dst_size), 0);
+
+ drm_fb_xrgb8888_to_rgb565(&dst, &result->dst_pitch, &src, &fb, &params->clip, true);
+ KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected_swab, dst_size), 0);
+}
+
+static void drm_test_fb_xrgb8888_to_rgb888(struct kunit *test)
+{
+ const struct convert_xrgb8888_case *params = test->param_value;
+ const struct convert_to_rgb888_result *result = &params->rgb888_result;
+ size_t dst_size;
+ __u8 *buf = NULL;
+ __u32 *xrgb8888 = NULL;
+ struct iosys_map dst, src;
+
+ struct drm_framebuffer fb = {
+ .format = drm_format_info(DRM_FORMAT_XRGB8888),
+ .pitches = { params->pitch, 0, 0 },
+ };
+
+ dst_size = conversion_buf_size(DRM_FORMAT_RGB888, result->dst_pitch,
+ &params->clip);
+ KUNIT_ASSERT_GT(test, dst_size, 0);
+
+ buf = kunit_kzalloc(test, dst_size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
+ iosys_map_set_vaddr(&dst, buf);
+
+ xrgb8888 = le32buf_to_cpu(test, params->xrgb8888, TEST_BUF_SIZE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xrgb8888);
+ iosys_map_set_vaddr(&src, xrgb8888);
+
+ drm_fb_xrgb8888_to_rgb888(&dst, &result->dst_pitch, &src, &fb, &params->clip);
+ KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected, dst_size), 0);
+}
+
+static void drm_test_fb_xrgb8888_to_xrgb2101010(struct kunit *test)
+{
+ const struct convert_xrgb8888_case *params = test->param_value;
+ const struct convert_to_xrgb2101010_result *result = &params->xrgb2101010_result;
+ size_t dst_size;
+ __u32 *buf = NULL;
+ __u32 *xrgb8888 = NULL;
+ struct iosys_map dst, src;
+
+ struct drm_framebuffer fb = {
+ .format = drm_format_info(DRM_FORMAT_XRGB8888),
+ .pitches = { params->pitch, 0, 0 },
+ };
+
+ dst_size = conversion_buf_size(DRM_FORMAT_XRGB2101010,
+ result->dst_pitch, &params->clip);
+ KUNIT_ASSERT_GT(test, dst_size, 0);
+
+ buf = kunit_kzalloc(test, dst_size, GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buf);
+ iosys_map_set_vaddr(&dst, buf);
+
+ xrgb8888 = le32buf_to_cpu(test, params->xrgb8888, TEST_BUF_SIZE);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, xrgb8888);
+ iosys_map_set_vaddr(&src, xrgb8888);
+
+ drm_fb_xrgb8888_to_xrgb2101010(&dst, &result->dst_pitch, &src, &fb, &params->clip);
+ buf = le32buf_to_cpu(test, buf, dst_size / sizeof(u32));
+ KUNIT_EXPECT_EQ(test, memcmp(buf, result->expected, dst_size), 0);
+}
+
+static struct kunit_case drm_format_helper_test_cases[] = {
+ KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_gray8, convert_xrgb8888_gen_params),
+ KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_rgb332, convert_xrgb8888_gen_params),
+ KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_rgb565, convert_xrgb8888_gen_params),
+ KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_rgb888, convert_xrgb8888_gen_params),
+ KUNIT_CASE_PARAM(drm_test_fb_xrgb8888_to_xrgb2101010, convert_xrgb8888_gen_params),
+ {}
+};
+
+static struct kunit_suite drm_format_helper_test_suite = {
+ .name = "drm_format_helper_test",
+ .test_cases = drm_format_helper_test_cases,
+};
+
+kunit_test_suite(drm_format_helper_test_suite);
+
+MODULE_DESCRIPTION("KUnit tests for the drm_format_helper APIs");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("José Expósito <jose.exposito89@gmail.com>");
diff --git a/drivers/gpu/drm/tests/drm_format_test.c b/drivers/gpu/drm/tests/drm_format_test.c
new file mode 100644
index 000000000..ec6996ce8
--- /dev/null
+++ b/drivers/gpu/drm/tests/drm_format_test.c
@@ -0,0 +1,359 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test cases for the drm_format functions
+ *
+ * Copyright (c) 2022 Maíra Canal <mairacanal@riseup.net>
+ */
+
+#include <kunit/test.h>
+
+#include <drm/drm_fourcc.h>
+
+static void drm_test_format_block_width_invalid(struct kunit *test)
+{
+ const struct drm_format_info *info = NULL;
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 0), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, -1), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 1), 0);
+}
+
+static void drm_test_format_block_width_one_plane(struct kunit *test)
+{
+ const struct drm_format_info *info = drm_format_info(DRM_FORMAT_XRGB4444);
+
+ KUNIT_ASSERT_NOT_NULL(test, info);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 0), 1);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 1), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, -1), 0);
+}
+
+static void drm_test_format_block_width_two_plane(struct kunit *test)
+{
+ const struct drm_format_info *info = drm_format_info(DRM_FORMAT_NV12);
+
+ KUNIT_ASSERT_NOT_NULL(test, info);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 0), 1);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 1), 1);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 2), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, -1), 0);
+}
+
+static void drm_test_format_block_width_three_plane(struct kunit *test)
+{
+ const struct drm_format_info *info = drm_format_info(DRM_FORMAT_YUV422);
+
+ KUNIT_ASSERT_NOT_NULL(test, info);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 0), 1);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 1), 1);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 2), 1);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 3), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, -1), 0);
+}
+
+static void drm_test_format_block_width_tiled(struct kunit *test)
+{
+ const struct drm_format_info *info = drm_format_info(DRM_FORMAT_X0L0);
+
+ KUNIT_ASSERT_NOT_NULL(test, info);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 0), 2);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, 1), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_width(info, -1), 0);
+}
+
+static void drm_test_format_block_height_invalid(struct kunit *test)
+{
+ const struct drm_format_info *info = NULL;
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 0), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, -1), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 1), 0);
+}
+
+static void drm_test_format_block_height_one_plane(struct kunit *test)
+{
+ const struct drm_format_info *info = drm_format_info(DRM_FORMAT_XRGB4444);
+
+ KUNIT_ASSERT_NOT_NULL(test, info);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 0), 1);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, -1), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 1), 0);
+}
+
+static void drm_test_format_block_height_two_plane(struct kunit *test)
+{
+ const struct drm_format_info *info = drm_format_info(DRM_FORMAT_NV12);
+
+ KUNIT_ASSERT_NOT_NULL(test, info);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 0), 1);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 1), 1);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 2), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, -1), 0);
+}
+
+static void drm_test_format_block_height_three_plane(struct kunit *test)
+{
+ const struct drm_format_info *info = drm_format_info(DRM_FORMAT_YUV422);
+
+ KUNIT_ASSERT_NOT_NULL(test, info);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 0), 1);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 1), 1);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 2), 1);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 3), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, -1), 0);
+}
+
+static void drm_test_format_block_height_tiled(struct kunit *test)
+{
+ const struct drm_format_info *info = drm_format_info(DRM_FORMAT_X0L0);
+
+ KUNIT_ASSERT_NOT_NULL(test, info);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 0), 2);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, 1), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_block_height(info, -1), 0);
+}
+
+static void drm_test_format_min_pitch_invalid(struct kunit *test)
+{
+ const struct drm_format_info *info = NULL;
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 0), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, -1, 0), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 0), 0);
+}
+
+static void drm_test_format_min_pitch_one_plane_8bpp(struct kunit *test)
+{
+ const struct drm_format_info *info = drm_format_info(DRM_FORMAT_RGB332);
+
+ KUNIT_ASSERT_NOT_NULL(test, info);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 0), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, -1, 0), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 0), 0);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1), 1);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 2), 2);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 640), 640);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1024), 1024);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1920), 1920);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 4096), 4096);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 671), 671);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, UINT_MAX),
+ (uint64_t)UINT_MAX);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, (UINT_MAX - 1)),
+ (uint64_t)(UINT_MAX - 1));
+}
+
+static void drm_test_format_min_pitch_one_plane_16bpp(struct kunit *test)
+{
+ const struct drm_format_info *info = drm_format_info(DRM_FORMAT_XRGB4444);
+
+ KUNIT_ASSERT_NOT_NULL(test, info);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 0), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, -1, 0), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 0), 0);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1), 2);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 2), 4);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 640), 1280);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1024), 2048);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1920), 3840);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 4096), 8192);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 671), 1342);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, UINT_MAX),
+ (uint64_t)UINT_MAX * 2);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, (UINT_MAX - 1)),
+ (uint64_t)(UINT_MAX - 1) * 2);
+}
+
+static void drm_test_format_min_pitch_one_plane_24bpp(struct kunit *test)
+{
+ const struct drm_format_info *info = drm_format_info(DRM_FORMAT_RGB888);
+
+ KUNIT_ASSERT_NOT_NULL(test, info);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 0), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, -1, 0), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 0), 0);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1), 3);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 2), 6);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 640), 1920);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1024), 3072);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1920), 5760);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 4096), 12288);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 671), 2013);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, UINT_MAX),
+ (uint64_t)UINT_MAX * 3);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, UINT_MAX - 1),
+ (uint64_t)(UINT_MAX - 1) * 3);
+}
+
+static void drm_test_format_min_pitch_one_plane_32bpp(struct kunit *test)
+{
+ const struct drm_format_info *info = drm_format_info(DRM_FORMAT_ABGR8888);
+
+ KUNIT_ASSERT_NOT_NULL(test, info);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 0), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, -1, 0), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 0), 0);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1), 4);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 2), 8);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 640), 2560);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1024), 4096);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1920), 7680);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 4096), 16384);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 671), 2684);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, UINT_MAX),
+ (uint64_t)UINT_MAX * 4);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, UINT_MAX - 1),
+ (uint64_t)(UINT_MAX - 1) * 4);
+}
+
+static void drm_test_format_min_pitch_two_plane(struct kunit *test)
+{
+ const struct drm_format_info *info = drm_format_info(DRM_FORMAT_NV12);
+
+ KUNIT_ASSERT_NOT_NULL(test, info);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 0), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 0), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, -1, 0), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 2, 0), 0);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1), 1);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 1), 2);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 2), 2);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 1), 2);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 640), 640);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 320), 640);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1024), 1024);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 512), 1024);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1920), 1920);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 960), 1920);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 4096), 4096);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 2048), 4096);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 671), 671);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 336), 672);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, UINT_MAX),
+ (uint64_t)UINT_MAX);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, UINT_MAX / 2 + 1),
+ (uint64_t)UINT_MAX + 1);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, (UINT_MAX - 1)),
+ (uint64_t)(UINT_MAX - 1));
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, (UINT_MAX - 1) / 2),
+ (uint64_t)(UINT_MAX - 1));
+}
+
+static void drm_test_format_min_pitch_three_plane_8bpp(struct kunit *test)
+{
+ const struct drm_format_info *info = drm_format_info(DRM_FORMAT_YUV422);
+
+ KUNIT_ASSERT_NOT_NULL(test, info);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 0), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 0), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 2, 0), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, -1, 0), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 3, 0), 0);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1), 1);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 1), 1);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 2, 1), 1);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 2), 2);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 2), 2);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 2, 2), 2);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 640), 640);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 320), 320);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 2, 320), 320);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1024), 1024);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 512), 512);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 2, 512), 512);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1920), 1920);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 960), 960);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 2, 960), 960);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 4096), 4096);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 2048), 2048);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 2, 2048), 2048);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 671), 671);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 336), 336);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 2, 336), 336);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, UINT_MAX),
+ (uint64_t)UINT_MAX);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, UINT_MAX / 2 + 1),
+ (uint64_t)UINT_MAX / 2 + 1);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 2, UINT_MAX / 2 + 1),
+ (uint64_t)UINT_MAX / 2 + 1);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, (UINT_MAX - 1) / 2),
+ (uint64_t)(UINT_MAX - 1) / 2);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, (UINT_MAX - 1) / 2),
+ (uint64_t)(UINT_MAX - 1) / 2);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 2, (UINT_MAX - 1) / 2),
+ (uint64_t)(UINT_MAX - 1) / 2);
+}
+
+static void drm_test_format_min_pitch_tiled(struct kunit *test)
+{
+ const struct drm_format_info *info = drm_format_info(DRM_FORMAT_X0L2);
+
+ KUNIT_ASSERT_NOT_NULL(test, info);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 0), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, -1, 0), 0);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 1, 0), 0);
+
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1), 2);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 2), 4);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 640), 1280);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1024), 2048);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 1920), 3840);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 4096), 8192);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, 671), 1342);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, UINT_MAX),
+ (uint64_t)UINT_MAX * 2);
+ KUNIT_EXPECT_EQ(test, drm_format_info_min_pitch(info, 0, UINT_MAX - 1),
+ (uint64_t)(UINT_MAX - 1) * 2);
+}
+
+static struct kunit_case drm_format_tests[] = {
+ KUNIT_CASE(drm_test_format_block_width_invalid),
+ KUNIT_CASE(drm_test_format_block_width_one_plane),
+ KUNIT_CASE(drm_test_format_block_width_two_plane),
+ KUNIT_CASE(drm_test_format_block_width_three_plane),
+ KUNIT_CASE(drm_test_format_block_width_tiled),
+ KUNIT_CASE(drm_test_format_block_height_invalid),
+ KUNIT_CASE(drm_test_format_block_height_one_plane),
+ KUNIT_CASE(drm_test_format_block_height_two_plane),
+ KUNIT_CASE(drm_test_format_block_height_three_plane),
+ KUNIT_CASE(drm_test_format_block_height_tiled),
+ KUNIT_CASE(drm_test_format_min_pitch_invalid),
+ KUNIT_CASE(drm_test_format_min_pitch_one_plane_8bpp),
+ KUNIT_CASE(drm_test_format_min_pitch_one_plane_16bpp),
+ KUNIT_CASE(drm_test_format_min_pitch_one_plane_24bpp),
+ KUNIT_CASE(drm_test_format_min_pitch_one_plane_32bpp),
+ KUNIT_CASE(drm_test_format_min_pitch_two_plane),
+ KUNIT_CASE(drm_test_format_min_pitch_three_plane_8bpp),
+ KUNIT_CASE(drm_test_format_min_pitch_tiled),
+ {}
+};
+
+static struct kunit_suite drm_format_test_suite = {
+ .name = "drm_format",
+ .test_cases = drm_format_tests,
+};
+
+kunit_test_suite(drm_format_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tests/drm_framebuffer_test.c b/drivers/gpu/drm/tests/drm_framebuffer_test.c
new file mode 100644
index 000000000..df235b7fd
--- /dev/null
+++ b/drivers/gpu/drm/tests/drm_framebuffer_test.c
@@ -0,0 +1,382 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test cases for the drm_framebuffer functions
+ *
+ * Copyright (c) 2022 Maíra Canal <mairacanal@riseup.net>
+ */
+
+#include <kunit/test.h>
+
+#include <drm/drm_device.h>
+#include <drm/drm_mode.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_print.h>
+
+#include "../drm_crtc_internal.h"
+
+#define MIN_WIDTH 4
+#define MAX_WIDTH 4096
+#define MIN_HEIGHT 4
+#define MAX_HEIGHT 4096
+
+struct drm_framebuffer_test {
+ int buffer_created;
+ struct drm_mode_fb_cmd2 cmd;
+ const char *name;
+};
+
+static const struct drm_framebuffer_test drm_framebuffer_create_cases[] = {
+{ .buffer_created = 1, .name = "ABGR8888 normal sizes",
+ .cmd = { .width = 600, .height = 600, .pixel_format = DRM_FORMAT_ABGR8888,
+ .handles = { 1, 0, 0 }, .pitches = { 4 * 600, 0, 0 },
+ }
+},
+{ .buffer_created = 1, .name = "ABGR8888 max sizes",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
+ .handles = { 1, 0, 0 }, .pitches = { 4 * MAX_WIDTH, 0, 0 },
+ }
+},
+{ .buffer_created = 1, .name = "ABGR8888 pitch greater than min required",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
+ .handles = { 1, 0, 0 }, .pitches = { 4 * MAX_WIDTH + 1, 0, 0 },
+ }
+},
+{ .buffer_created = 0, .name = "ABGR8888 pitch less than min required",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
+ .handles = { 1, 0, 0 }, .pitches = { 4 * MAX_WIDTH - 1, 0, 0 },
+ }
+},
+{ .buffer_created = 0, .name = "ABGR8888 Invalid width",
+ .cmd = { .width = MAX_WIDTH + 1, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
+ .handles = { 1, 0, 0 }, .pitches = { 4 * (MAX_WIDTH + 1), 0, 0 },
+ }
+},
+{ .buffer_created = 0, .name = "ABGR8888 Invalid buffer handle",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
+ .handles = { 0, 0, 0 }, .pitches = { 4 * MAX_WIDTH, 0, 0 },
+ }
+},
+{ .buffer_created = 0, .name = "No pixel format",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = 0,
+ .handles = { 1, 0, 0 }, .pitches = { 4 * MAX_WIDTH, 0, 0 },
+ }
+},
+{ .buffer_created = 0, .name = "ABGR8888 Width 0",
+ .cmd = { .width = 0, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
+ .handles = { 1, 0, 0 }, .pitches = { 4 * MAX_WIDTH, 0, 0 },
+ }
+},
+{ .buffer_created = 0, .name = "ABGR8888 Height 0",
+ .cmd = { .width = MAX_WIDTH, .height = 0, .pixel_format = DRM_FORMAT_ABGR8888,
+ .handles = { 1, 0, 0 }, .pitches = { 4 * MAX_WIDTH, 0, 0 },
+ }
+},
+{ .buffer_created = 0, .name = "ABGR8888 Out of bound height * pitch combination",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
+ .handles = { 1, 0, 0 }, .offsets = { UINT_MAX - 1, 0, 0 },
+ .pitches = { 4 * MAX_WIDTH, 0, 0 },
+ }
+},
+{ .buffer_created = 1, .name = "ABGR8888 Large buffer offset",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
+ .handles = { 1, 0, 0 }, .offsets = { UINT_MAX / 2, 0, 0 },
+ .pitches = { 4 * MAX_WIDTH, 0, 0 },
+ }
+},
+{ .buffer_created = 1, .name = "ABGR8888 Set DRM_MODE_FB_MODIFIERS without modifiers",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
+ .handles = { 1, 0, 0 }, .offsets = { UINT_MAX / 2, 0, 0 },
+ .pitches = { 4 * MAX_WIDTH, 0, 0 }, .flags = DRM_MODE_FB_MODIFIERS,
+ }
+},
+{ .buffer_created = 1, .name = "ABGR8888 Valid buffer modifier",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
+ .handles = { 1, 0, 0 }, .offsets = { UINT_MAX / 2, 0, 0 },
+ .pitches = { 4 * MAX_WIDTH, 0, 0 }, .flags = DRM_MODE_FB_MODIFIERS,
+ .modifier = { AFBC_FORMAT_MOD_YTR, 0, 0 },
+ }
+},
+{ .buffer_created = 0,
+ .name = "ABGR8888 Invalid buffer modifier(DRM_FORMAT_MOD_SAMSUNG_64_32_TILE)",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
+ .handles = { 1, 0, 0 }, .offsets = { UINT_MAX / 2, 0, 0 },
+ .pitches = { 4 * MAX_WIDTH, 0, 0 }, .flags = DRM_MODE_FB_MODIFIERS,
+ .modifier = { DRM_FORMAT_MOD_SAMSUNG_64_32_TILE, 0, 0 },
+ }
+},
+{ .buffer_created = 1, .name = "ABGR8888 Extra pitches without DRM_MODE_FB_MODIFIERS",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
+ .handles = { 1, 0, 0 }, .offsets = { UINT_MAX / 2, 0, 0 },
+ .pitches = { 4 * MAX_WIDTH, 4 * MAX_WIDTH, 0 },
+ }
+},
+{ .buffer_created = 0, .name = "ABGR8888 Extra pitches with DRM_MODE_FB_MODIFIERS",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_ABGR8888,
+ .handles = { 1, 0, 0 }, .flags = DRM_MODE_FB_MODIFIERS,
+ .pitches = { 4 * MAX_WIDTH, 4 * MAX_WIDTH, 0 },
+ }
+},
+{ .buffer_created = 1, .name = "NV12 Normal sizes",
+ .cmd = { .width = 600, .height = 600, .pixel_format = DRM_FORMAT_NV12,
+ .handles = { 1, 1, 0 }, .pitches = { 600, 600, 0 },
+ }
+},
+{ .buffer_created = 1, .name = "NV12 Max sizes",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_NV12,
+ .handles = { 1, 1, 0 }, .pitches = { MAX_WIDTH, MAX_WIDTH, 0 },
+ }
+},
+{ .buffer_created = 0, .name = "NV12 Invalid pitch",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_NV12,
+ .handles = { 1, 1, 0 }, .pitches = { MAX_WIDTH, MAX_WIDTH - 1, 0 },
+ }
+},
+{ .buffer_created = 0, .name = "NV12 Invalid modifier/missing DRM_MODE_FB_MODIFIERS flag",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_NV12,
+ .handles = { 1, 1, 0 }, .modifier = { DRM_FORMAT_MOD_SAMSUNG_64_32_TILE, 0, 0 },
+ .pitches = { MAX_WIDTH, MAX_WIDTH, 0 },
+ }
+},
+{ .buffer_created = 0, .name = "NV12 different modifier per-plane",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_NV12,
+ .handles = { 1, 1, 0 }, .flags = DRM_MODE_FB_MODIFIERS,
+ .modifier = { DRM_FORMAT_MOD_SAMSUNG_64_32_TILE, 0, 0 },
+ .pitches = { MAX_WIDTH, MAX_WIDTH, 0 },
+ }
+},
+{ .buffer_created = 1, .name = "NV12 with DRM_FORMAT_MOD_SAMSUNG_64_32_TILE",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_NV12,
+ .handles = { 1, 1, 0 }, .flags = DRM_MODE_FB_MODIFIERS,
+ .modifier = { DRM_FORMAT_MOD_SAMSUNG_64_32_TILE,
+ DRM_FORMAT_MOD_SAMSUNG_64_32_TILE, 0 },
+ .pitches = { MAX_WIDTH, MAX_WIDTH, 0 },
+ }
+},
+{ .buffer_created = 0, .name = "NV12 Valid modifiers without DRM_MODE_FB_MODIFIERS",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_NV12,
+ .handles = { 1, 1, 0 }, .modifier = { DRM_FORMAT_MOD_SAMSUNG_64_32_TILE,
+ DRM_FORMAT_MOD_SAMSUNG_64_32_TILE, 0 },
+ .pitches = { MAX_WIDTH, MAX_WIDTH, 0 },
+ }
+},
+{ .buffer_created = 0, .name = "NV12 Modifier for inexistent plane",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_NV12,
+ .handles = { 1, 1, 0 }, .flags = DRM_MODE_FB_MODIFIERS,
+ .modifier = { DRM_FORMAT_MOD_SAMSUNG_64_32_TILE, DRM_FORMAT_MOD_SAMSUNG_64_32_TILE,
+ DRM_FORMAT_MOD_SAMSUNG_64_32_TILE },
+ .pitches = { MAX_WIDTH, MAX_WIDTH, 0 },
+ }
+},
+{ .buffer_created = 0, .name = "NV12 Handle for inexistent plane",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_NV12,
+ .handles = { 1, 1, 1 }, .flags = DRM_MODE_FB_MODIFIERS,
+ .pitches = { MAX_WIDTH, MAX_WIDTH, 0 },
+ }
+},
+{ .buffer_created = 1, .name = "NV12 Handle for inexistent plane without DRM_MODE_FB_MODIFIERS",
+ .cmd = { .width = 600, .height = 600, .pixel_format = DRM_FORMAT_NV12,
+ .handles = { 1, 1, 1 }, .pitches = { 600, 600, 600 },
+ }
+},
+{ .buffer_created = 1, .name = "YVU420 Normal sizes",
+ .cmd = { .width = 600, .height = 600, .pixel_format = DRM_FORMAT_YVU420,
+ .handles = { 1, 1, 1 }, .flags = DRM_MODE_FB_MODIFIERS,
+ .pitches = { 600, 300, 300 },
+ }
+},
+{ .buffer_created = 1, .name = "YVU420 DRM_MODE_FB_MODIFIERS set without modifier",
+ .cmd = { .width = 600, .height = 600, .pixel_format = DRM_FORMAT_YVU420,
+ .handles = { 1, 1, 1 }, .pitches = { 600, 300, 300 },
+ }
+},
+{ .buffer_created = 1, .name = "YVU420 Max sizes",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YVU420,
+ .handles = { 1, 1, 1 }, .pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2),
+ DIV_ROUND_UP(MAX_WIDTH, 2) },
+ }
+},
+{ .buffer_created = 0, .name = "YVU420 Invalid pitch",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YVU420,
+ .handles = { 1, 1, 1 }, .pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2) - 1,
+ DIV_ROUND_UP(MAX_WIDTH, 2) },
+ }
+},
+{ .buffer_created = 1, .name = "YVU420 Different pitches",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YVU420,
+ .handles = { 1, 1, 1 }, .pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2) + 1,
+ DIV_ROUND_UP(MAX_WIDTH, 2) + 7 },
+ }
+},
+{ .buffer_created = 1, .name = "YVU420 Different buffer offsets/pitches",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YVU420,
+ .handles = { 1, 1, 1 }, .offsets = { MAX_WIDTH, MAX_WIDTH +
+ MAX_WIDTH * MAX_HEIGHT, MAX_WIDTH + 2 * MAX_WIDTH * MAX_HEIGHT },
+ .pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2) + 1,
+ DIV_ROUND_UP(MAX_WIDTH, 2) + 7 },
+ }
+},
+{ .buffer_created = 0,
+ .name = "YVU420 Modifier set just for plane 0, without DRM_MODE_FB_MODIFIERS",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YVU420,
+ .handles = { 1, 1, 1 }, .modifier = { AFBC_FORMAT_MOD_SPARSE, 0, 0 },
+ .pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2), DIV_ROUND_UP(MAX_WIDTH, 2) },
+ }
+},
+{ .buffer_created = 0,
+ .name = "YVU420 Modifier set just for planes 0, 1, without DRM_MODE_FB_MODIFIERS",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YVU420,
+ .handles = { 1, 1, 1 },
+ .modifier = { AFBC_FORMAT_MOD_SPARSE, AFBC_FORMAT_MOD_SPARSE, 0 },
+ .pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2), DIV_ROUND_UP(MAX_WIDTH, 2) },
+ }
+},
+{ .buffer_created = 0,
+ .name = "YVU420 Modifier set just for plane 0, 1, with DRM_MODE_FB_MODIFIERS",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YVU420,
+ .handles = { 1, 1, 1 }, .flags = DRM_MODE_FB_MODIFIERS,
+ .modifier = { AFBC_FORMAT_MOD_SPARSE, AFBC_FORMAT_MOD_SPARSE, 0 },
+ .pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2), DIV_ROUND_UP(MAX_WIDTH, 2) },
+ }
+},
+{ .buffer_created = 1, .name = "YVU420 Valid modifier",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YVU420,
+ .handles = { 1, 1, 1 }, .flags = DRM_MODE_FB_MODIFIERS,
+ .modifier = { AFBC_FORMAT_MOD_SPARSE, AFBC_FORMAT_MOD_SPARSE,
+ AFBC_FORMAT_MOD_SPARSE },
+ .pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2), DIV_ROUND_UP(MAX_WIDTH, 2) },
+ }
+},
+{ .buffer_created = 0, .name = "YVU420 Different modifiers per plane",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YVU420,
+ .handles = { 1, 1, 1 }, .flags = DRM_MODE_FB_MODIFIERS,
+ .modifier = { AFBC_FORMAT_MOD_SPARSE, AFBC_FORMAT_MOD_SPARSE | AFBC_FORMAT_MOD_YTR,
+ AFBC_FORMAT_MOD_SPARSE },
+ .pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2), DIV_ROUND_UP(MAX_WIDTH, 2) },
+ }
+},
+{ .buffer_created = 0, .name = "YVU420 Modifier for inexistent plane",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_YVU420,
+ .handles = { 1, 1, 1 }, .flags = DRM_MODE_FB_MODIFIERS,
+ .modifier = { AFBC_FORMAT_MOD_SPARSE, AFBC_FORMAT_MOD_SPARSE,
+ AFBC_FORMAT_MOD_SPARSE, AFBC_FORMAT_MOD_SPARSE },
+ .pitches = { MAX_WIDTH, DIV_ROUND_UP(MAX_WIDTH, 2), DIV_ROUND_UP(MAX_WIDTH, 2) },
+ }
+},
+{ .buffer_created = 1, .name = "X0L2 Normal sizes",
+ .cmd = { .width = 600, .height = 600, .pixel_format = DRM_FORMAT_X0L2,
+ .handles = { 1, 0, 0 }, .pitches = { 1200, 0, 0 }
+ }
+},
+{ .buffer_created = 1, .name = "X0L2 Max sizes",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_X0L2,
+ .handles = { 1, 0, 0 }, .pitches = { 2 * MAX_WIDTH, 0, 0 }
+ }
+},
+{ .buffer_created = 0, .name = "X0L2 Invalid pitch",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_X0L2,
+ .handles = { 1, 0, 0 }, .pitches = { 2 * MAX_WIDTH - 1, 0, 0 }
+ }
+},
+{ .buffer_created = 1, .name = "X0L2 Pitch greater than minimum required",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_X0L2,
+ .handles = { 1, 0, 0 }, .pitches = { 2 * MAX_WIDTH + 1, 0, 0 }
+ }
+},
+{ .buffer_created = 0, .name = "X0L2 Handle for inexistent plane",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_X0L2,
+ .handles = { 1, 1, 0 }, .flags = DRM_MODE_FB_MODIFIERS,
+ .pitches = { 2 * MAX_WIDTH + 1, 0, 0 }
+ }
+},
+{ .buffer_created = 1,
+ .name = "X0L2 Offset for inexistent plane, without DRM_MODE_FB_MODIFIERS set",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_X0L2,
+ .handles = { 1, 0, 0 }, .offsets = { 0, 0, 3 },
+ .pitches = { 2 * MAX_WIDTH + 1, 0, 0 }
+ }
+},
+{ .buffer_created = 0, .name = "X0L2 Modifier without DRM_MODE_FB_MODIFIERS set",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_X0L2,
+ .handles = { 1, 0, 0 }, .pitches = { 2 * MAX_WIDTH + 1, 0, 0 },
+ .modifier = { AFBC_FORMAT_MOD_SPARSE, 0, 0 },
+ }
+},
+{ .buffer_created = 1, .name = "X0L2 Valid modifier",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT, .pixel_format = DRM_FORMAT_X0L2,
+ .handles = { 1, 0, 0 }, .pitches = { 2 * MAX_WIDTH + 1, 0, 0 },
+ .modifier = { AFBC_FORMAT_MOD_SPARSE, 0, 0 }, .flags = DRM_MODE_FB_MODIFIERS,
+ }
+},
+{ .buffer_created = 0, .name = "X0L2 Modifier for inexistent plane",
+ .cmd = { .width = MAX_WIDTH, .height = MAX_HEIGHT,
+ .pixel_format = DRM_FORMAT_X0L2, .handles = { 1, 0, 0 },
+ .pitches = { 2 * MAX_WIDTH + 1, 0, 0 },
+ .modifier = { AFBC_FORMAT_MOD_SPARSE, AFBC_FORMAT_MOD_SPARSE, 0 },
+ .flags = DRM_MODE_FB_MODIFIERS,
+ }
+},
+};
+
+static struct drm_framebuffer *fb_create_mock(struct drm_device *dev,
+ struct drm_file *file_priv,
+ const struct drm_mode_fb_cmd2 *mode_cmd)
+{
+ int *buffer_created = dev->dev_private;
+ *buffer_created = 1;
+ return ERR_PTR(-EINVAL);
+}
+
+static struct drm_mode_config_funcs mock_config_funcs = {
+ .fb_create = fb_create_mock,
+};
+
+static int drm_framebuffer_test_init(struct kunit *test)
+{
+ struct drm_device *mock;
+
+ mock = kunit_kzalloc(test, sizeof(*mock), GFP_KERNEL);
+ KUNIT_ASSERT_NOT_ERR_OR_NULL(test, mock);
+
+ mock->mode_config.min_width = MIN_WIDTH;
+ mock->mode_config.max_width = MAX_WIDTH;
+ mock->mode_config.min_height = MIN_HEIGHT;
+ mock->mode_config.max_height = MAX_HEIGHT;
+ mock->mode_config.funcs = &mock_config_funcs;
+
+ test->priv = mock;
+ return 0;
+}
+
+static void drm_test_framebuffer_create(struct kunit *test)
+{
+ const struct drm_framebuffer_test *params = test->param_value;
+ struct drm_device *mock = test->priv;
+ int buffer_created = 0;
+
+ mock->dev_private = &buffer_created;
+ drm_internal_framebuffer_create(mock, &params->cmd, NULL);
+ KUNIT_EXPECT_EQ(test, params->buffer_created, buffer_created);
+}
+
+static void drm_framebuffer_test_to_desc(const struct drm_framebuffer_test *t, char *desc)
+{
+ strcpy(desc, t->name);
+}
+
+KUNIT_ARRAY_PARAM(drm_framebuffer_create, drm_framebuffer_create_cases,
+ drm_framebuffer_test_to_desc);
+
+static struct kunit_case drm_framebuffer_tests[] = {
+ KUNIT_CASE_PARAM(drm_test_framebuffer_create, drm_framebuffer_create_gen_params),
+ { }
+};
+
+static struct kunit_suite drm_framebuffer_test_suite = {
+ .name = "drm_framebuffer",
+ .init = drm_framebuffer_test_init,
+ .test_cases = drm_framebuffer_tests,
+};
+
+kunit_test_suite(drm_framebuffer_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tests/drm_mm_test.c b/drivers/gpu/drm/tests/drm_mm_test.c
new file mode 100644
index 000000000..13fa4a18a
--- /dev/null
+++ b/drivers/gpu/drm/tests/drm_mm_test.c
@@ -0,0 +1,2256 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Test cases for the drm_mm range manager
+ *
+ * Copyright (c) 2022 Arthur Grillo <arthur.grillo@usp.br>
+ */
+
+#include <kunit/test.h>
+
+#include <linux/prime_numbers.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <linux/vmalloc.h>
+#include <linux/ktime.h>
+
+#include <drm/drm_mm.h>
+
+#include "../lib/drm_random.h"
+
+static unsigned int random_seed;
+static unsigned int max_iterations = 8192;
+static unsigned int max_prime = 128;
+
+enum {
+ BEST,
+ BOTTOMUP,
+ TOPDOWN,
+ EVICT,
+};
+
+static const struct insert_mode {
+ const char *name;
+ enum drm_mm_insert_mode mode;
+} insert_modes[] = {
+ [BEST] = { "best", DRM_MM_INSERT_BEST },
+ [BOTTOMUP] = { "bottom-up", DRM_MM_INSERT_LOW },
+ [TOPDOWN] = { "top-down", DRM_MM_INSERT_HIGH },
+ [EVICT] = { "evict", DRM_MM_INSERT_EVICT },
+ {}
+}, evict_modes[] = {
+ { "bottom-up", DRM_MM_INSERT_LOW },
+ { "top-down", DRM_MM_INSERT_HIGH },
+ {}
+};
+
+static bool assert_no_holes(struct kunit *test, const struct drm_mm *mm)
+{
+ struct drm_mm_node *hole;
+ u64 hole_start, __always_unused hole_end;
+ unsigned long count;
+
+ count = 0;
+ drm_mm_for_each_hole(hole, mm, hole_start, hole_end)
+ count++;
+ if (count) {
+ KUNIT_FAIL(test,
+ "Expected to find no holes (after reserve), found %lu instead\n", count);
+ return false;
+ }
+
+ drm_mm_for_each_node(hole, mm) {
+ if (drm_mm_hole_follows(hole)) {
+ KUNIT_FAIL(test, "Hole follows node, expected none!\n");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool assert_one_hole(struct kunit *test, const struct drm_mm *mm, u64 start, u64 end)
+{
+ struct drm_mm_node *hole;
+ u64 hole_start, hole_end;
+ unsigned long count;
+ bool ok = true;
+
+ if (end <= start)
+ return true;
+
+ count = 0;
+ drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
+ if (start != hole_start || end != hole_end) {
+ if (ok)
+ KUNIT_FAIL(test,
+ "empty mm has incorrect hole, found (%llx, %llx), expect (%llx, %llx)\n",
+ hole_start, hole_end, start, end);
+ ok = false;
+ }
+ count++;
+ }
+ if (count != 1) {
+ KUNIT_FAIL(test, "Expected to find one hole, found %lu instead\n", count);
+ ok = false;
+ }
+
+ return ok;
+}
+
+static bool assert_continuous(struct kunit *test, const struct drm_mm *mm, u64 size)
+{
+ struct drm_mm_node *node, *check, *found;
+ unsigned long n;
+ u64 addr;
+
+ if (!assert_no_holes(test, mm))
+ return false;
+
+ n = 0;
+ addr = 0;
+ drm_mm_for_each_node(node, mm) {
+ if (node->start != addr) {
+ KUNIT_FAIL(test, "node[%ld] list out of order, expected %llx found %llx\n",
+ n, addr, node->start);
+ return false;
+ }
+
+ if (node->size != size) {
+ KUNIT_FAIL(test, "node[%ld].size incorrect, expected %llx, found %llx\n",
+ n, size, node->size);
+ return false;
+ }
+
+ if (drm_mm_hole_follows(node)) {
+ KUNIT_FAIL(test, "node[%ld] is followed by a hole!\n", n);
+ return false;
+ }
+
+ found = NULL;
+ drm_mm_for_each_node_in_range(check, mm, addr, addr + size) {
+ if (node != check) {
+ KUNIT_FAIL(test,
+ "lookup return wrong node, expected start %llx, found %llx\n",
+ node->start, check->start);
+ return false;
+ }
+ found = check;
+ }
+ if (!found) {
+ KUNIT_FAIL(test, "lookup failed for node %llx + %llx\n", addr, size);
+ return false;
+ }
+
+ addr += size;
+ n++;
+ }
+
+ return true;
+}
+
+static u64 misalignment(struct drm_mm_node *node, u64 alignment)
+{
+ u64 rem;
+
+ if (!alignment)
+ return 0;
+
+ div64_u64_rem(node->start, alignment, &rem);
+ return rem;
+}
+
+static bool assert_node(struct kunit *test, struct drm_mm_node *node, struct drm_mm *mm,
+ u64 size, u64 alignment, unsigned long color)
+{
+ bool ok = true;
+
+ if (!drm_mm_node_allocated(node) || node->mm != mm) {
+ KUNIT_FAIL(test, "node not allocated\n");
+ ok = false;
+ }
+
+ if (node->size != size) {
+ KUNIT_FAIL(test, "node has wrong size, found %llu, expected %llu\n",
+ node->size, size);
+ ok = false;
+ }
+
+ if (misalignment(node, alignment)) {
+ KUNIT_FAIL(test,
+ "node is misaligned, start %llx rem %llu, expected alignment %llu\n",
+ node->start, misalignment(node, alignment), alignment);
+ ok = false;
+ }
+
+ if (node->color != color) {
+ KUNIT_FAIL(test, "node has wrong color, found %lu, expected %lu\n",
+ node->color, color);
+ ok = false;
+ }
+
+ return ok;
+}
+
+static void drm_test_mm_init(struct kunit *test)
+{
+ const unsigned int size = 4096;
+ struct drm_mm mm;
+ struct drm_mm_node tmp;
+
+ /* Start with some simple checks on initialising the struct drm_mm */
+ memset(&mm, 0, sizeof(mm));
+ KUNIT_ASSERT_FALSE_MSG(test, drm_mm_initialized(&mm),
+ "zeroed mm claims to be initialized\n");
+
+ memset(&mm, 0xff, sizeof(mm));
+ drm_mm_init(&mm, 0, size);
+ if (!drm_mm_initialized(&mm)) {
+ KUNIT_FAIL(test, "mm claims not to be initialized\n");
+ goto out;
+ }
+
+ if (!drm_mm_clean(&mm)) {
+ KUNIT_FAIL(test, "mm not empty on creation\n");
+ goto out;
+ }
+
+ /* After creation, it should all be one massive hole */
+ if (!assert_one_hole(test, &mm, 0, size)) {
+ KUNIT_FAIL(test, "");
+ goto out;
+ }
+
+ memset(&tmp, 0, sizeof(tmp));
+ tmp.start = 0;
+ tmp.size = size;
+ if (drm_mm_reserve_node(&mm, &tmp)) {
+ KUNIT_FAIL(test, "failed to reserve whole drm_mm\n");
+ goto out;
+ }
+
+ /* After filling the range entirely, there should be no holes */
+ if (!assert_no_holes(test, &mm)) {
+ KUNIT_FAIL(test, "");
+ goto out;
+ }
+
+ /* And then after emptying it again, the massive hole should be back */
+ drm_mm_remove_node(&tmp);
+ if (!assert_one_hole(test, &mm, 0, size)) {
+ KUNIT_FAIL(test, "");
+ goto out;
+ }
+
+out:
+ drm_mm_takedown(&mm);
+}
+
+static void drm_test_mm_debug(struct kunit *test)
+{
+ struct drm_mm mm;
+ struct drm_mm_node nodes[2];
+
+ /* Create a small drm_mm with a couple of nodes and a few holes, and
+ * check that the debug iterator doesn't explode over a trivial drm_mm.
+ */
+
+ drm_mm_init(&mm, 0, 4096);
+
+ memset(nodes, 0, sizeof(nodes));
+ nodes[0].start = 512;
+ nodes[0].size = 1024;
+ KUNIT_ASSERT_FALSE_MSG(test, drm_mm_reserve_node(&mm, &nodes[0]),
+ "failed to reserve node[0] {start=%lld, size=%lld)\n",
+ nodes[0].start, nodes[0].size);
+
+ nodes[1].size = 1024;
+ nodes[1].start = 4096 - 512 - nodes[1].size;
+ KUNIT_ASSERT_FALSE_MSG(test, drm_mm_reserve_node(&mm, &nodes[1]),
+ "failed to reserve node[0] {start=%lld, size=%lld)\n",
+ nodes[0].start, nodes[0].size);
+}
+
+static struct drm_mm_node *set_node(struct drm_mm_node *node,
+ u64 start, u64 size)
+{
+ node->start = start;
+ node->size = size;
+ return node;
+}
+
+static bool expect_reserve_fail(struct kunit *test, struct drm_mm *mm, struct drm_mm_node *node)
+{
+ int err;
+
+ err = drm_mm_reserve_node(mm, node);
+ if (likely(err == -ENOSPC))
+ return true;
+
+ if (!err) {
+ KUNIT_FAIL(test, "impossible reserve succeeded, node %llu + %llu\n",
+ node->start, node->size);
+ drm_mm_remove_node(node);
+ } else {
+ KUNIT_FAIL(test,
+ "impossible reserve failed with wrong error %d [expected %d], node %llu + %llu\n",
+ err, -ENOSPC, node->start, node->size);
+ }
+ return false;
+}
+
+static bool check_reserve_boundaries(struct kunit *test, struct drm_mm *mm,
+ unsigned int count,
+ u64 size)
+{
+ const struct boundary {
+ u64 start, size;
+ const char *name;
+ } boundaries[] = {
+#define B(st, sz) { (st), (sz), "{ " #st ", " #sz "}" }
+ B(0, 0),
+ B(-size, 0),
+ B(size, 0),
+ B(size * count, 0),
+ B(-size, size),
+ B(-size, -size),
+ B(-size, 2 * size),
+ B(0, -size),
+ B(size, -size),
+ B(count * size, size),
+ B(count * size, -size),
+ B(count * size, count * size),
+ B(count * size, -count * size),
+ B(count * size, -(count + 1) * size),
+ B((count + 1) * size, size),
+ B((count + 1) * size, -size),
+ B((count + 1) * size, -2 * size),
+#undef B
+ };
+ struct drm_mm_node tmp = {};
+ int n;
+
+ for (n = 0; n < ARRAY_SIZE(boundaries); n++) {
+ if (!expect_reserve_fail(test, mm, set_node(&tmp, boundaries[n].start,
+ boundaries[n].size))) {
+ KUNIT_FAIL(test, "boundary[%d:%s] failed, count=%u, size=%lld\n",
+ n, boundaries[n].name, count, size);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static int __drm_test_mm_reserve(struct kunit *test, unsigned int count, u64 size)
+{
+ DRM_RND_STATE(prng, random_seed);
+ struct drm_mm mm;
+ struct drm_mm_node tmp, *nodes, *node, *next;
+ unsigned int *order, n, m, o = 0;
+ int ret, err;
+
+ /* For exercising drm_mm_reserve_node(), we want to check that
+ * reservations outside of the drm_mm range are rejected, and to
+ * overlapping and otherwise already occupied ranges. Afterwards,
+ * the tree and nodes should be intact.
+ */
+
+ DRM_MM_BUG_ON(!count);
+ DRM_MM_BUG_ON(!size);
+
+ ret = -ENOMEM;
+ order = drm_random_order(count, &prng);
+ if (!order)
+ goto err;
+
+ nodes = vzalloc(array_size(count, sizeof(*nodes)));
+ KUNIT_ASSERT_TRUE(test, nodes);
+
+ ret = -EINVAL;
+ drm_mm_init(&mm, 0, count * size);
+
+ if (!check_reserve_boundaries(test, &mm, count, size))
+ goto out;
+
+ for (n = 0; n < count; n++) {
+ nodes[n].start = order[n] * size;
+ nodes[n].size = size;
+
+ err = drm_mm_reserve_node(&mm, &nodes[n]);
+ if (err) {
+ KUNIT_FAIL(test, "reserve failed, step %d, start %llu\n",
+ n, nodes[n].start);
+ ret = err;
+ goto out;
+ }
+
+ if (!drm_mm_node_allocated(&nodes[n])) {
+ KUNIT_FAIL(test, "reserved node not allocated! step %d, start %llu\n",
+ n, nodes[n].start);
+ goto out;
+ }
+
+ if (!expect_reserve_fail(test, &mm, &nodes[n]))
+ goto out;
+ }
+
+ /* After random insertion the nodes should be in order */
+ if (!assert_continuous(test, &mm, size))
+ goto out;
+
+ /* Repeated use should then fail */
+ drm_random_reorder(order, count, &prng);
+ for (n = 0; n < count; n++) {
+ if (!expect_reserve_fail(test, &mm, set_node(&tmp, order[n] * size, 1)))
+ goto out;
+
+ /* Remove and reinsert should work */
+ drm_mm_remove_node(&nodes[order[n]]);
+ err = drm_mm_reserve_node(&mm, &nodes[order[n]]);
+ if (err) {
+ KUNIT_FAIL(test, "reserve failed, step %d, start %llu\n",
+ n, nodes[n].start);
+ ret = err;
+ goto out;
+ }
+ }
+
+ if (!assert_continuous(test, &mm, size))
+ goto out;
+
+ /* Overlapping use should then fail */
+ for (n = 0; n < count; n++) {
+ if (!expect_reserve_fail(test, &mm, set_node(&tmp, 0, size * count)))
+ goto out;
+ }
+ for (n = 0; n < count; n++) {
+ if (!expect_reserve_fail(test, &mm, set_node(&tmp, size * n, size * (count - n))))
+ goto out;
+ }
+
+ /* Remove several, reinsert, check full */
+ for_each_prime_number(n, min(max_prime, count)) {
+ for (m = 0; m < n; m++) {
+ node = &nodes[order[(o + m) % count]];
+ drm_mm_remove_node(node);
+ }
+
+ for (m = 0; m < n; m++) {
+ node = &nodes[order[(o + m) % count]];
+ err = drm_mm_reserve_node(&mm, node);
+ if (err) {
+ KUNIT_FAIL(test, "reserve failed, step %d/%d, start %llu\n",
+ m, n, node->start);
+ ret = err;
+ goto out;
+ }
+ }
+
+ o += n;
+
+ if (!assert_continuous(test, &mm, size))
+ goto out;
+ }
+
+ ret = 0;
+out:
+ drm_mm_for_each_node_safe(node, next, &mm)
+ drm_mm_remove_node(node);
+ drm_mm_takedown(&mm);
+ vfree(nodes);
+ kfree(order);
+err:
+ return ret;
+}
+
+static void drm_test_mm_reserve(struct kunit *test)
+{
+ const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
+ int n;
+
+ for_each_prime_number_from(n, 1, 54) {
+ u64 size = BIT_ULL(n);
+
+ KUNIT_ASSERT_FALSE(test, __drm_test_mm_reserve(test, count, size - 1));
+ KUNIT_ASSERT_FALSE(test, __drm_test_mm_reserve(test, count, size));
+ KUNIT_ASSERT_FALSE(test, __drm_test_mm_reserve(test, count, size + 1));
+
+ cond_resched();
+ }
+}
+
+static bool expect_insert(struct kunit *test, struct drm_mm *mm,
+ struct drm_mm_node *node, u64 size, u64 alignment, unsigned long color,
+ const struct insert_mode *mode)
+{
+ int err;
+
+ err = drm_mm_insert_node_generic(mm, node,
+ size, alignment, color,
+ mode->mode);
+ if (err) {
+ KUNIT_FAIL(test,
+ "insert (size=%llu, alignment=%llu, color=%lu, mode=%s) failed with err=%d\n",
+ size, alignment, color, mode->name, err);
+ return false;
+ }
+
+ if (!assert_node(test, node, mm, size, alignment, color)) {
+ drm_mm_remove_node(node);
+ return false;
+ }
+
+ return true;
+}
+
+static bool expect_insert_fail(struct kunit *test, struct drm_mm *mm, u64 size)
+{
+ struct drm_mm_node tmp = {};
+ int err;
+
+ err = drm_mm_insert_node(mm, &tmp, size);
+ if (likely(err == -ENOSPC))
+ return true;
+
+ if (!err) {
+ KUNIT_FAIL(test, "impossible insert succeeded, node %llu + %llu\n",
+ tmp.start, tmp.size);
+ drm_mm_remove_node(&tmp);
+ } else {
+ KUNIT_FAIL(test,
+ "impossible insert failed with wrong error %d [expected %d], size %llu\n",
+ err, -ENOSPC, size);
+ }
+ return false;
+}
+
+static int __drm_test_mm_insert(struct kunit *test, unsigned int count, u64 size, bool replace)
+{
+ DRM_RND_STATE(prng, random_seed);
+ const struct insert_mode *mode;
+ struct drm_mm mm;
+ struct drm_mm_node *nodes, *node, *next;
+ unsigned int *order, n, m, o = 0;
+ int ret;
+
+ /* Fill a range with lots of nodes, check it doesn't fail too early */
+
+ DRM_MM_BUG_ON(!count);
+ DRM_MM_BUG_ON(!size);
+
+ ret = -ENOMEM;
+ nodes = vmalloc(array_size(count, sizeof(*nodes)));
+ KUNIT_ASSERT_TRUE(test, nodes);
+
+ order = drm_random_order(count, &prng);
+ if (!order)
+ goto err_nodes;
+
+ ret = -EINVAL;
+ drm_mm_init(&mm, 0, count * size);
+
+ for (mode = insert_modes; mode->name; mode++) {
+ for (n = 0; n < count; n++) {
+ struct drm_mm_node tmp;
+
+ node = replace ? &tmp : &nodes[n];
+ memset(node, 0, sizeof(*node));
+ if (!expect_insert(test, &mm, node, size, 0, n, mode)) {
+ KUNIT_FAIL(test, "%s insert failed, size %llu step %d\n",
+ mode->name, size, n);
+ goto out;
+ }
+
+ if (replace) {
+ drm_mm_replace_node(&tmp, &nodes[n]);
+ if (drm_mm_node_allocated(&tmp)) {
+ KUNIT_FAIL(test,
+ "replaced old-node still allocated! step %d\n",
+ n);
+ goto out;
+ }
+
+ if (!assert_node(test, &nodes[n], &mm, size, 0, n)) {
+ KUNIT_FAIL(test,
+ "replaced node did not inherit parameters, size %llu step %d\n",
+ size, n);
+ goto out;
+ }
+
+ if (tmp.start != nodes[n].start) {
+ KUNIT_FAIL(test,
+ "replaced node mismatch location expected [%llx + %llx], found [%llx + %llx]\n",
+ tmp.start, size, nodes[n].start, nodes[n].size);
+ goto out;
+ }
+ }
+ }
+
+ /* After random insertion the nodes should be in order */
+ if (!assert_continuous(test, &mm, size))
+ goto out;
+
+ /* Repeated use should then fail */
+ if (!expect_insert_fail(test, &mm, size))
+ goto out;
+
+ /* Remove one and reinsert, as the only hole it should refill itself */
+ for (n = 0; n < count; n++) {
+ u64 addr = nodes[n].start;
+
+ drm_mm_remove_node(&nodes[n]);
+ if (!expect_insert(test, &mm, &nodes[n], size, 0, n, mode)) {
+ KUNIT_FAIL(test, "%s reinsert failed, size %llu step %d\n",
+ mode->name, size, n);
+ goto out;
+ }
+
+ if (nodes[n].start != addr) {
+ KUNIT_FAIL(test,
+ "%s reinsert node moved, step %d, expected %llx, found %llx\n",
+ mode->name, n, addr, nodes[n].start);
+ goto out;
+ }
+
+ if (!assert_continuous(test, &mm, size))
+ goto out;
+ }
+
+ /* Remove several, reinsert, check full */
+ for_each_prime_number(n, min(max_prime, count)) {
+ for (m = 0; m < n; m++) {
+ node = &nodes[order[(o + m) % count]];
+ drm_mm_remove_node(node);
+ }
+
+ for (m = 0; m < n; m++) {
+ node = &nodes[order[(o + m) % count]];
+ if (!expect_insert(test, &mm, node, size, 0, n, mode)) {
+ KUNIT_FAIL(test,
+ "%s multiple reinsert failed, size %llu step %d\n",
+ mode->name, size, n);
+ goto out;
+ }
+ }
+
+ o += n;
+
+ if (!assert_continuous(test, &mm, size))
+ goto out;
+
+ if (!expect_insert_fail(test, &mm, size))
+ goto out;
+ }
+
+ drm_mm_for_each_node_safe(node, next, &mm)
+ drm_mm_remove_node(node);
+ DRM_MM_BUG_ON(!drm_mm_clean(&mm));
+
+ cond_resched();
+ }
+
+ ret = 0;
+out:
+ drm_mm_for_each_node_safe(node, next, &mm)
+ drm_mm_remove_node(node);
+ drm_mm_takedown(&mm);
+ kfree(order);
+err_nodes:
+ vfree(nodes);
+ return ret;
+}
+
+static void drm_test_mm_insert(struct kunit *test)
+{
+ const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
+ unsigned int n;
+
+ for_each_prime_number_from(n, 1, 54) {
+ u64 size = BIT_ULL(n);
+
+ KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size - 1, false));
+ KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size, false));
+ KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size + 1, false));
+
+ cond_resched();
+ }
+}
+
+static void drm_test_mm_replace(struct kunit *test)
+{
+ const unsigned int count = min_t(unsigned int, BIT(10), max_iterations);
+ unsigned int n;
+
+ /* Reuse __drm_test_mm_insert to exercise replacement by inserting a dummy node,
+ * then replacing it with the intended node. We want to check that
+ * the tree is intact and all the information we need is carried
+ * across to the target node.
+ */
+
+ for_each_prime_number_from(n, 1, 54) {
+ u64 size = BIT_ULL(n);
+
+ KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size - 1, true));
+ KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size, true));
+ KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert(test, count, size + 1, true));
+
+ cond_resched();
+ }
+}
+
+static bool expect_insert_in_range(struct kunit *test, struct drm_mm *mm, struct drm_mm_node *node,
+ u64 size, u64 alignment, unsigned long color,
+ u64 range_start, u64 range_end, const struct insert_mode *mode)
+{
+ int err;
+
+ err = drm_mm_insert_node_in_range(mm, node,
+ size, alignment, color,
+ range_start, range_end,
+ mode->mode);
+ if (err) {
+ KUNIT_FAIL(test,
+ "insert (size=%llu, alignment=%llu, color=%lu, mode=%s) nto range [%llx, %llx] failed with err=%d\n",
+ size, alignment, color, mode->name,
+ range_start, range_end, err);
+ return false;
+ }
+
+ if (!assert_node(test, node, mm, size, alignment, color)) {
+ drm_mm_remove_node(node);
+ return false;
+ }
+
+ return true;
+}
+
+static bool expect_insert_in_range_fail(struct kunit *test, struct drm_mm *mm,
+ u64 size, u64 range_start, u64 range_end)
+{
+ struct drm_mm_node tmp = {};
+ int err;
+
+ err = drm_mm_insert_node_in_range(mm, &tmp, size, 0, 0, range_start, range_end,
+ 0);
+ if (likely(err == -ENOSPC))
+ return true;
+
+ if (!err) {
+ KUNIT_FAIL(test,
+ "impossible insert succeeded, node %llx + %llu, range [%llx, %llx]\n",
+ tmp.start, tmp.size, range_start, range_end);
+ drm_mm_remove_node(&tmp);
+ } else {
+ KUNIT_FAIL(test,
+ "impossible insert failed with wrong error %d [expected %d], size %llu, range [%llx, %llx]\n",
+ err, -ENOSPC, size, range_start, range_end);
+ }
+
+ return false;
+}
+
+static bool assert_contiguous_in_range(struct kunit *test, struct drm_mm *mm,
+ u64 size, u64 start, u64 end)
+{
+ struct drm_mm_node *node;
+ unsigned int n;
+
+ if (!expect_insert_in_range_fail(test, mm, size, start, end))
+ return false;
+
+ n = div64_u64(start + size - 1, size);
+ drm_mm_for_each_node(node, mm) {
+ if (node->start < start || node->start + node->size > end) {
+ KUNIT_FAIL(test,
+ "node %d out of range, address [%llx + %llu], range [%llx, %llx]\n",
+ n, node->start, node->start + node->size, start, end);
+ return false;
+ }
+
+ if (node->start != n * size) {
+ KUNIT_FAIL(test, "node %d out of order, expected start %llx, found %llx\n",
+ n, n * size, node->start);
+ return false;
+ }
+
+ if (node->size != size) {
+ KUNIT_FAIL(test, "node %d has wrong size, expected size %llx, found %llx\n",
+ n, size, node->size);
+ return false;
+ }
+
+ if (drm_mm_hole_follows(node) && drm_mm_hole_node_end(node) < end) {
+ KUNIT_FAIL(test, "node %d is followed by a hole!\n", n);
+ return false;
+ }
+
+ n++;
+ }
+
+ if (start > 0) {
+ node = __drm_mm_interval_first(mm, 0, start - 1);
+ if (drm_mm_node_allocated(node)) {
+ KUNIT_FAIL(test, "node before start: node=%llx+%llu, start=%llx\n",
+ node->start, node->size, start);
+ return false;
+ }
+ }
+
+ if (end < U64_MAX) {
+ node = __drm_mm_interval_first(mm, end, U64_MAX);
+ if (drm_mm_node_allocated(node)) {
+ KUNIT_FAIL(test, "node after end: node=%llx+%llu, end=%llx\n",
+ node->start, node->size, end);
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static int __drm_test_mm_insert_range(struct kunit *test, unsigned int count, u64 size,
+ u64 start, u64 end)
+{
+ const struct insert_mode *mode;
+ struct drm_mm mm;
+ struct drm_mm_node *nodes, *node, *next;
+ unsigned int n, start_n, end_n;
+ int ret;
+
+ DRM_MM_BUG_ON(!count);
+ DRM_MM_BUG_ON(!size);
+ DRM_MM_BUG_ON(end <= start);
+
+ /* Very similar to __drm_test_mm_insert(), but now instead of populating the
+ * full range of the drm_mm, we try to fill a small portion of it.
+ */
+
+ ret = -ENOMEM;
+ nodes = vzalloc(array_size(count, sizeof(*nodes)));
+ KUNIT_ASSERT_TRUE(test, nodes);
+
+ ret = -EINVAL;
+ drm_mm_init(&mm, 0, count * size);
+
+ start_n = div64_u64(start + size - 1, size);
+ end_n = div64_u64(end - size, size);
+
+ for (mode = insert_modes; mode->name; mode++) {
+ for (n = start_n; n <= end_n; n++) {
+ if (!expect_insert_in_range(test, &mm, &nodes[n], size, size, n,
+ start, end, mode)) {
+ KUNIT_FAIL(test,
+ "%s insert failed, size %llu, step %d [%d, %d], range [%llx, %llx]\n",
+ mode->name, size, n, start_n, end_n, start, end);
+ goto out;
+ }
+ }
+
+ if (!assert_contiguous_in_range(test, &mm, size, start, end)) {
+ KUNIT_FAIL(test,
+ "%s: range [%llx, %llx] not full after initialisation, size=%llu\n",
+ mode->name, start, end, size);
+ goto out;
+ }
+
+ /* Remove one and reinsert, it should refill itself */
+ for (n = start_n; n <= end_n; n++) {
+ u64 addr = nodes[n].start;
+
+ drm_mm_remove_node(&nodes[n]);
+ if (!expect_insert_in_range(test, &mm, &nodes[n], size, size, n,
+ start, end, mode)) {
+ KUNIT_FAIL(test, "%s reinsert failed, step %d\n", mode->name, n);
+ goto out;
+ }
+
+ if (nodes[n].start != addr) {
+ KUNIT_FAIL(test,
+ "%s reinsert node moved, step %d, expected %llx, found %llx\n",
+ mode->name, n, addr, nodes[n].start);
+ goto out;
+ }
+ }
+
+ if (!assert_contiguous_in_range(test, &mm, size, start, end)) {
+ KUNIT_FAIL(test,
+ "%s: range [%llx, %llx] not full after reinsertion, size=%llu\n",
+ mode->name, start, end, size);
+ goto out;
+ }
+
+ drm_mm_for_each_node_safe(node, next, &mm)
+ drm_mm_remove_node(node);
+ DRM_MM_BUG_ON(!drm_mm_clean(&mm));
+
+ cond_resched();
+ }
+
+ ret = 0;
+out:
+ drm_mm_for_each_node_safe(node, next, &mm)
+ drm_mm_remove_node(node);
+ drm_mm_takedown(&mm);
+ vfree(nodes);
+ return ret;
+}
+
+static int insert_outside_range(struct kunit *test)
+{
+ struct drm_mm mm;
+ const unsigned int start = 1024;
+ const unsigned int end = 2048;
+ const unsigned int size = end - start;
+
+ drm_mm_init(&mm, start, size);
+
+ if (!expect_insert_in_range_fail(test, &mm, 1, 0, start))
+ return -EINVAL;
+
+ if (!expect_insert_in_range_fail(test, &mm, size,
+ start - size / 2, start + (size + 1) / 2))
+ return -EINVAL;
+
+ if (!expect_insert_in_range_fail(test, &mm, size,
+ end - (size + 1) / 2, end + size / 2))
+ return -EINVAL;
+
+ if (!expect_insert_in_range_fail(test, &mm, 1, end, end + size))
+ return -EINVAL;
+
+ drm_mm_takedown(&mm);
+ return 0;
+}
+
+static void drm_test_mm_insert_range(struct kunit *test)
+{
+ const unsigned int count = min_t(unsigned int, BIT(13), max_iterations);
+ unsigned int n;
+
+ /* Check that requests outside the bounds of drm_mm are rejected. */
+ KUNIT_ASSERT_FALSE(test, insert_outside_range(test));
+
+ for_each_prime_number_from(n, 1, 50) {
+ const u64 size = BIT_ULL(n);
+ const u64 max = count * size;
+
+ KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size, 0, max));
+ KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size, 1, max));
+ KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size, 0, max - 1));
+ KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size, 0, max / 2));
+ KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size,
+ max / 2, max));
+ KUNIT_ASSERT_FALSE(test, __drm_test_mm_insert_range(test, count, size,
+ max / 4 + 1, 3 * max / 4 - 1));
+
+ cond_resched();
+ }
+}
+
+static int prepare_frag(struct kunit *test, struct drm_mm *mm, struct drm_mm_node *nodes,
+ unsigned int num_insert, const struct insert_mode *mode)
+{
+ unsigned int size = 4096;
+ unsigned int i;
+
+ for (i = 0; i < num_insert; i++) {
+ if (!expect_insert(test, mm, &nodes[i], size, 0, i, mode) != 0) {
+ KUNIT_FAIL(test, "%s insert failed\n", mode->name);
+ return -EINVAL;
+ }
+ }
+
+ /* introduce fragmentation by freeing every other node */
+ for (i = 0; i < num_insert; i++) {
+ if (i % 2 == 0)
+ drm_mm_remove_node(&nodes[i]);
+ }
+
+ return 0;
+}
+
+static u64 get_insert_time(struct kunit *test, struct drm_mm *mm,
+ unsigned int num_insert, struct drm_mm_node *nodes,
+ const struct insert_mode *mode)
+{
+ unsigned int size = 8192;
+ ktime_t start;
+ unsigned int i;
+
+ start = ktime_get();
+ for (i = 0; i < num_insert; i++) {
+ if (!expect_insert(test, mm, &nodes[i], size, 0, i, mode) != 0) {
+ KUNIT_FAIL(test, "%s insert failed\n", mode->name);
+ return 0;
+ }
+ }
+
+ return ktime_to_ns(ktime_sub(ktime_get(), start));
+}
+
+static void drm_test_mm_frag(struct kunit *test)
+{
+ struct drm_mm mm;
+ const struct insert_mode *mode;
+ struct drm_mm_node *nodes, *node, *next;
+ unsigned int insert_size = 10000;
+ unsigned int scale_factor = 4;
+
+ /* We need 4 * insert_size nodes to hold intermediate allocated
+ * drm_mm nodes.
+ * 1 times for prepare_frag()
+ * 1 times for get_insert_time()
+ * 2 times for get_insert_time()
+ */
+ nodes = vzalloc(array_size(insert_size * 4, sizeof(*nodes)));
+ KUNIT_ASSERT_TRUE(test, nodes);
+
+ /* For BOTTOMUP and TOPDOWN, we first fragment the
+ * address space using prepare_frag() and then try to verify
+ * that insertions scale quadratically from 10k to 20k insertions
+ */
+ drm_mm_init(&mm, 1, U64_MAX - 2);
+ for (mode = insert_modes; mode->name; mode++) {
+ u64 insert_time1, insert_time2;
+
+ if (mode->mode != DRM_MM_INSERT_LOW &&
+ mode->mode != DRM_MM_INSERT_HIGH)
+ continue;
+
+ if (prepare_frag(test, &mm, nodes, insert_size, mode))
+ goto err;
+
+ insert_time1 = get_insert_time(test, &mm, insert_size,
+ nodes + insert_size, mode);
+ if (insert_time1 == 0)
+ goto err;
+
+ insert_time2 = get_insert_time(test, &mm, (insert_size * 2),
+ nodes + insert_size * 2, mode);
+ if (insert_time2 == 0)
+ goto err;
+
+ kunit_info(test, "%s fragmented insert of %u and %u insertions took %llu and %llu nsecs\n",
+ mode->name, insert_size, insert_size * 2, insert_time1, insert_time2);
+
+ if (insert_time2 > (scale_factor * insert_time1)) {
+ KUNIT_FAIL(test, "%s fragmented insert took %llu nsecs more\n",
+ mode->name, insert_time2 - (scale_factor * insert_time1));
+ goto err;
+ }
+
+ drm_mm_for_each_node_safe(node, next, &mm)
+ drm_mm_remove_node(node);
+ }
+
+err:
+ drm_mm_for_each_node_safe(node, next, &mm)
+ drm_mm_remove_node(node);
+ drm_mm_takedown(&mm);
+ vfree(nodes);
+}
+
+static void drm_test_mm_align(struct kunit *test)
+{
+ const struct insert_mode *mode;
+ const unsigned int max_count = min(8192u, max_prime);
+ struct drm_mm mm;
+ struct drm_mm_node *nodes, *node, *next;
+ unsigned int prime;
+
+ /* For each of the possible insertion modes, we pick a few
+ * arbitrary alignments and check that the inserted node
+ * meets our requirements.
+ */
+
+ nodes = vzalloc(array_size(max_count, sizeof(*nodes)));
+ KUNIT_ASSERT_TRUE(test, nodes);
+
+ drm_mm_init(&mm, 1, U64_MAX - 2);
+
+ for (mode = insert_modes; mode->name; mode++) {
+ unsigned int i = 0;
+
+ for_each_prime_number_from(prime, 1, max_count) {
+ u64 size = next_prime_number(prime);
+
+ if (!expect_insert(test, &mm, &nodes[i], size, prime, i, mode)) {
+ KUNIT_FAIL(test, "%s insert failed with alignment=%d",
+ mode->name, prime);
+ goto out;
+ }
+
+ i++;
+ }
+
+ drm_mm_for_each_node_safe(node, next, &mm)
+ drm_mm_remove_node(node);
+ DRM_MM_BUG_ON(!drm_mm_clean(&mm));
+
+ cond_resched();
+ }
+
+out:
+ drm_mm_for_each_node_safe(node, next, &mm)
+ drm_mm_remove_node(node);
+ drm_mm_takedown(&mm);
+ vfree(nodes);
+}
+
+static void drm_test_mm_align_pot(struct kunit *test, int max)
+{
+ struct drm_mm mm;
+ struct drm_mm_node *node, *next;
+ int bit;
+
+ /* Check that we can align to the full u64 address space */
+
+ drm_mm_init(&mm, 1, U64_MAX - 2);
+
+ for (bit = max - 1; bit; bit--) {
+ u64 align, size;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node) {
+ KUNIT_FAIL(test, "failed to allocate node");
+ goto out;
+ }
+
+ align = BIT_ULL(bit);
+ size = BIT_ULL(bit - 1) + 1;
+ if (!expect_insert(test, &mm, node, size, align, bit, &insert_modes[0])) {
+ KUNIT_FAIL(test, "insert failed with alignment=%llx [%d]", align, bit);
+ goto out;
+ }
+
+ cond_resched();
+ }
+
+out:
+ drm_mm_for_each_node_safe(node, next, &mm) {
+ drm_mm_remove_node(node);
+ kfree(node);
+ }
+ drm_mm_takedown(&mm);
+}
+
+static void drm_test_mm_align32(struct kunit *test)
+{
+ drm_test_mm_align_pot(test, 32);
+}
+
+static void drm_test_mm_align64(struct kunit *test)
+{
+ drm_test_mm_align_pot(test, 64);
+}
+
+static void show_scan(struct kunit *test, const struct drm_mm_scan *scan)
+{
+ kunit_info(test, "scan: hit [%llx, %llx], size=%lld, align=%lld, color=%ld\n",
+ scan->hit_start, scan->hit_end, scan->size, scan->alignment, scan->color);
+}
+
+static void show_holes(struct kunit *test, const struct drm_mm *mm, int count)
+{
+ u64 hole_start, hole_end;
+ struct drm_mm_node *hole;
+
+ drm_mm_for_each_hole(hole, mm, hole_start, hole_end) {
+ struct drm_mm_node *next = list_next_entry(hole, node_list);
+ const char *node1 = NULL, *node2 = NULL;
+
+ if (drm_mm_node_allocated(hole))
+ node1 = kasprintf(GFP_KERNEL, "[%llx + %lld, color=%ld], ",
+ hole->start, hole->size, hole->color);
+
+ if (drm_mm_node_allocated(next))
+ node2 = kasprintf(GFP_KERNEL, ", [%llx + %lld, color=%ld]",
+ next->start, next->size, next->color);
+
+ kunit_info(test, "%sHole [%llx - %llx, size %lld]%s\n", node1,
+ hole_start, hole_end, hole_end - hole_start, node2);
+
+ kfree(node2);
+ kfree(node1);
+
+ if (!--count)
+ break;
+ }
+}
+
+struct evict_node {
+ struct drm_mm_node node;
+ struct list_head link;
+};
+
+static bool evict_nodes(struct kunit *test, struct drm_mm_scan *scan,
+ struct evict_node *nodes, unsigned int *order, unsigned int count,
+ bool use_color, struct list_head *evict_list)
+{
+ struct evict_node *e, *en;
+ unsigned int i;
+
+ for (i = 0; i < count; i++) {
+ e = &nodes[order ? order[i] : i];
+ list_add(&e->link, evict_list);
+ if (drm_mm_scan_add_block(scan, &e->node))
+ break;
+ }
+ list_for_each_entry_safe(e, en, evict_list, link) {
+ if (!drm_mm_scan_remove_block(scan, &e->node))
+ list_del(&e->link);
+ }
+ if (list_empty(evict_list)) {
+ KUNIT_FAIL(test,
+ "Failed to find eviction: size=%lld [avail=%d], align=%lld (color=%lu)\n",
+ scan->size, count, scan->alignment, scan->color);
+ return false;
+ }
+
+ list_for_each_entry(e, evict_list, link)
+ drm_mm_remove_node(&e->node);
+
+ if (use_color) {
+ struct drm_mm_node *node;
+
+ while ((node = drm_mm_scan_color_evict(scan))) {
+ e = container_of(node, typeof(*e), node);
+ drm_mm_remove_node(&e->node);
+ list_add(&e->link, evict_list);
+ }
+ } else {
+ if (drm_mm_scan_color_evict(scan)) {
+ KUNIT_FAIL(test,
+ "drm_mm_scan_color_evict unexpectedly reported overlapping nodes!\n");
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool evict_nothing(struct kunit *test, struct drm_mm *mm,
+ unsigned int total_size, struct evict_node *nodes)
+{
+ struct drm_mm_scan scan;
+ LIST_HEAD(evict_list);
+ struct evict_node *e;
+ struct drm_mm_node *node;
+ unsigned int n;
+
+ drm_mm_scan_init(&scan, mm, 1, 0, 0, 0);
+ for (n = 0; n < total_size; n++) {
+ e = &nodes[n];
+ list_add(&e->link, &evict_list);
+ drm_mm_scan_add_block(&scan, &e->node);
+ }
+ list_for_each_entry(e, &evict_list, link)
+ drm_mm_scan_remove_block(&scan, &e->node);
+
+ for (n = 0; n < total_size; n++) {
+ e = &nodes[n];
+
+ if (!drm_mm_node_allocated(&e->node)) {
+ KUNIT_FAIL(test, "node[%d] no longer allocated!\n", n);
+ return false;
+ }
+
+ e->link.next = NULL;
+ }
+
+ drm_mm_for_each_node(node, mm) {
+ e = container_of(node, typeof(*e), node);
+ e->link.next = &e->link;
+ }
+
+ for (n = 0; n < total_size; n++) {
+ e = &nodes[n];
+
+ if (!e->link.next) {
+ KUNIT_FAIL(test, "node[%d] no longer connected!\n", n);
+ return false;
+ }
+ }
+
+ return assert_continuous(test, mm, nodes[0].node.size);
+}
+
+static bool evict_everything(struct kunit *test, struct drm_mm *mm,
+ unsigned int total_size, struct evict_node *nodes)
+{
+ struct drm_mm_scan scan;
+ LIST_HEAD(evict_list);
+ struct evict_node *e;
+ unsigned int n;
+ int err;
+
+ drm_mm_scan_init(&scan, mm, total_size, 0, 0, 0);
+ for (n = 0; n < total_size; n++) {
+ e = &nodes[n];
+ list_add(&e->link, &evict_list);
+ if (drm_mm_scan_add_block(&scan, &e->node))
+ break;
+ }
+
+ err = 0;
+ list_for_each_entry(e, &evict_list, link) {
+ if (!drm_mm_scan_remove_block(&scan, &e->node)) {
+ if (!err) {
+ KUNIT_FAIL(test, "Node %lld not marked for eviction!\n",
+ e->node.start);
+ err = -EINVAL;
+ }
+ }
+ }
+ if (err)
+ return false;
+
+ list_for_each_entry(e, &evict_list, link)
+ drm_mm_remove_node(&e->node);
+
+ if (!assert_one_hole(test, mm, 0, total_size))
+ return false;
+
+ list_for_each_entry(e, &evict_list, link) {
+ err = drm_mm_reserve_node(mm, &e->node);
+ if (err) {
+ KUNIT_FAIL(test, "Failed to reinsert node after eviction: start=%llx\n",
+ e->node.start);
+ return false;
+ }
+ }
+
+ return assert_continuous(test, mm, nodes[0].node.size);
+}
+
+static int evict_something(struct kunit *test, struct drm_mm *mm,
+ u64 range_start, u64 range_end, struct evict_node *nodes,
+ unsigned int *order, unsigned int count, unsigned int size,
+ unsigned int alignment, const struct insert_mode *mode)
+{
+ struct drm_mm_scan scan;
+ LIST_HEAD(evict_list);
+ struct evict_node *e;
+ struct drm_mm_node tmp;
+ int err;
+
+ drm_mm_scan_init_with_range(&scan, mm, size, alignment, 0, range_start,
+ range_end, mode->mode);
+ if (!evict_nodes(test, &scan, nodes, order, count, false, &evict_list))
+ return -EINVAL;
+
+ memset(&tmp, 0, sizeof(tmp));
+ err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, 0,
+ DRM_MM_INSERT_EVICT);
+ if (err) {
+ KUNIT_FAIL(test, "Failed to insert into eviction hole: size=%d, align=%d\n",
+ size, alignment);
+ show_scan(test, &scan);
+ show_holes(test, mm, 3);
+ return err;
+ }
+
+ if (tmp.start < range_start || tmp.start + tmp.size > range_end) {
+ KUNIT_FAIL(test,
+ "Inserted [address=%llu + %llu] did not fit into the request range [%llu, %llu]\n",
+ tmp.start, tmp.size, range_start, range_end);
+ err = -EINVAL;
+ }
+
+ if (!assert_node(test, &tmp, mm, size, alignment, 0) ||
+ drm_mm_hole_follows(&tmp)) {
+ KUNIT_FAIL(test,
+ "Inserted did not fill the eviction hole: size=%lld [%d], align=%d [rem=%lld], start=%llx, hole-follows?=%d\n",
+ tmp.size, size, alignment, misalignment(&tmp, alignment),
+ tmp.start, drm_mm_hole_follows(&tmp));
+ err = -EINVAL;
+ }
+
+ drm_mm_remove_node(&tmp);
+ if (err)
+ return err;
+
+ list_for_each_entry(e, &evict_list, link) {
+ err = drm_mm_reserve_node(mm, &e->node);
+ if (err) {
+ KUNIT_FAIL(test, "Failed to reinsert node after eviction: start=%llx\n",
+ e->node.start);
+ return err;
+ }
+ }
+
+ if (!assert_continuous(test, mm, nodes[0].node.size)) {
+ KUNIT_FAIL(test, "range is no longer continuous\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void drm_test_mm_evict(struct kunit *test)
+{
+ DRM_RND_STATE(prng, random_seed);
+ const unsigned int size = 8192;
+ const struct insert_mode *mode;
+ struct drm_mm mm;
+ struct evict_node *nodes;
+ struct drm_mm_node *node, *next;
+ unsigned int *order, n;
+
+ /* Here we populate a full drm_mm and then try and insert a new node
+ * by evicting other nodes in a random order. The drm_mm_scan should
+ * pick the first matching hole it finds from the random list. We
+ * repeat that for different allocation strategies, alignments and
+ * sizes to try and stress the hole finder.
+ */
+
+ nodes = vzalloc(array_size(size, sizeof(*nodes)));
+ KUNIT_ASSERT_TRUE(test, nodes);
+
+ order = drm_random_order(size, &prng);
+ if (!order)
+ goto err_nodes;
+
+ drm_mm_init(&mm, 0, size);
+ for (n = 0; n < size; n++) {
+ if (drm_mm_insert_node(&mm, &nodes[n].node, 1)) {
+ KUNIT_FAIL(test, "insert failed, step %d\n", n);
+ goto out;
+ }
+ }
+
+ /* First check that using the scanner doesn't break the mm */
+ if (!evict_nothing(test, &mm, size, nodes)) {
+ KUNIT_FAIL(test, "evict_nothing() failed\n");
+ goto out;
+ }
+ if (!evict_everything(test, &mm, size, nodes)) {
+ KUNIT_FAIL(test, "evict_everything() failed\n");
+ goto out;
+ }
+
+ for (mode = evict_modes; mode->name; mode++) {
+ for (n = 1; n <= size; n <<= 1) {
+ drm_random_reorder(order, size, &prng);
+ if (evict_something(test, &mm, 0, U64_MAX, nodes, order, size, n, 1,
+ mode)) {
+ KUNIT_FAIL(test, "%s evict_something(size=%u) failed\n",
+ mode->name, n);
+ goto out;
+ }
+ }
+
+ for (n = 1; n < size; n <<= 1) {
+ drm_random_reorder(order, size, &prng);
+ if (evict_something(test, &mm, 0, U64_MAX, nodes, order, size,
+ size / 2, n, mode)) {
+ KUNIT_FAIL(test,
+ "%s evict_something(size=%u, alignment=%u) failed\n",
+ mode->name, size / 2, n);
+ goto out;
+ }
+ }
+
+ for_each_prime_number_from(n, 1, min(size, max_prime)) {
+ unsigned int nsize = (size - n + 1) / 2;
+
+ DRM_MM_BUG_ON(!nsize);
+
+ drm_random_reorder(order, size, &prng);
+ if (evict_something(test, &mm, 0, U64_MAX, nodes, order, size,
+ nsize, n, mode)) {
+ KUNIT_FAIL(test,
+ "%s evict_something(size=%u, alignment=%u) failed\n",
+ mode->name, nsize, n);
+ goto out;
+ }
+ }
+
+ cond_resched();
+ }
+
+out:
+ drm_mm_for_each_node_safe(node, next, &mm)
+ drm_mm_remove_node(node);
+ drm_mm_takedown(&mm);
+ kfree(order);
+err_nodes:
+ vfree(nodes);
+}
+
+static void drm_test_mm_evict_range(struct kunit *test)
+{
+ DRM_RND_STATE(prng, random_seed);
+ const unsigned int size = 8192;
+ const unsigned int range_size = size / 2;
+ const unsigned int range_start = size / 4;
+ const unsigned int range_end = range_start + range_size;
+ const struct insert_mode *mode;
+ struct drm_mm mm;
+ struct evict_node *nodes;
+ struct drm_mm_node *node, *next;
+ unsigned int *order, n;
+
+ /* Like drm_test_mm_evict() but now we are limiting the search to a
+ * small portion of the full drm_mm.
+ */
+
+ nodes = vzalloc(array_size(size, sizeof(*nodes)));
+ KUNIT_ASSERT_TRUE(test, nodes);
+
+ order = drm_random_order(size, &prng);
+ if (!order)
+ goto err_nodes;
+
+ drm_mm_init(&mm, 0, size);
+ for (n = 0; n < size; n++) {
+ if (drm_mm_insert_node(&mm, &nodes[n].node, 1)) {
+ KUNIT_FAIL(test, "insert failed, step %d\n", n);
+ goto out;
+ }
+ }
+
+ for (mode = evict_modes; mode->name; mode++) {
+ for (n = 1; n <= range_size; n <<= 1) {
+ drm_random_reorder(order, size, &prng);
+ if (evict_something(test, &mm, range_start, range_end, nodes,
+ order, size, n, 1, mode)) {
+ KUNIT_FAIL(test,
+ "%s evict_something(size=%u) failed with range [%u, %u]\n",
+ mode->name, n, range_start, range_end);
+ goto out;
+ }
+ }
+
+ for (n = 1; n <= range_size; n <<= 1) {
+ drm_random_reorder(order, size, &prng);
+ if (evict_something(test, &mm, range_start, range_end, nodes,
+ order, size, range_size / 2, n, mode)) {
+ KUNIT_FAIL(test,
+ "%s evict_something(size=%u, alignment=%u) failed with range [%u, %u]\n",
+ mode->name, range_size / 2, n, range_start, range_end);
+ goto out;
+ }
+ }
+
+ for_each_prime_number_from(n, 1, min(range_size, max_prime)) {
+ unsigned int nsize = (range_size - n + 1) / 2;
+
+ DRM_MM_BUG_ON(!nsize);
+
+ drm_random_reorder(order, size, &prng);
+ if (evict_something(test, &mm, range_start, range_end, nodes,
+ order, size, nsize, n, mode)) {
+ KUNIT_FAIL(test,
+ "%s evict_something(size=%u, alignment=%u) failed with range [%u, %u]\n",
+ mode->name, nsize, n, range_start, range_end);
+ goto out;
+ }
+ }
+
+ cond_resched();
+ }
+
+out:
+ drm_mm_for_each_node_safe(node, next, &mm)
+ drm_mm_remove_node(node);
+ drm_mm_takedown(&mm);
+ kfree(order);
+err_nodes:
+ vfree(nodes);
+}
+
+static unsigned int node_index(const struct drm_mm_node *node)
+{
+ return div64_u64(node->start, node->size);
+}
+
+static void drm_test_mm_topdown(struct kunit *test)
+{
+ const struct insert_mode *topdown = &insert_modes[TOPDOWN];
+
+ DRM_RND_STATE(prng, random_seed);
+ const unsigned int count = 8192;
+ unsigned int size;
+ unsigned long *bitmap;
+ struct drm_mm mm;
+ struct drm_mm_node *nodes, *node, *next;
+ unsigned int *order, n, m, o = 0;
+
+ /* When allocating top-down, we expect to be returned a node
+ * from a suitable hole at the top of the drm_mm. We check that
+ * the returned node does match the highest available slot.
+ */
+
+ nodes = vzalloc(array_size(count, sizeof(*nodes)));
+ KUNIT_ASSERT_TRUE(test, nodes);
+
+ bitmap = bitmap_zalloc(count, GFP_KERNEL);
+ if (!bitmap)
+ goto err_nodes;
+
+ order = drm_random_order(count, &prng);
+ if (!order)
+ goto err_bitmap;
+
+ for (size = 1; size <= 64; size <<= 1) {
+ drm_mm_init(&mm, 0, size * count);
+ for (n = 0; n < count; n++) {
+ if (!expect_insert(test, &mm, &nodes[n], size, 0, n, topdown)) {
+ KUNIT_FAIL(test, "insert failed, size %u step %d\n", size, n);
+ goto out;
+ }
+
+ if (drm_mm_hole_follows(&nodes[n])) {
+ KUNIT_FAIL(test,
+ "hole after topdown insert %d, start=%llx\n, size=%u",
+ n, nodes[n].start, size);
+ goto out;
+ }
+
+ if (!assert_one_hole(test, &mm, 0, size * (count - n - 1)))
+ goto out;
+ }
+
+ if (!assert_continuous(test, &mm, size))
+ goto out;
+
+ drm_random_reorder(order, count, &prng);
+ for_each_prime_number_from(n, 1, min(count, max_prime)) {
+ for (m = 0; m < n; m++) {
+ node = &nodes[order[(o + m) % count]];
+ drm_mm_remove_node(node);
+ __set_bit(node_index(node), bitmap);
+ }
+
+ for (m = 0; m < n; m++) {
+ unsigned int last;
+
+ node = &nodes[order[(o + m) % count]];
+ if (!expect_insert(test, &mm, node, size, 0, 0, topdown)) {
+ KUNIT_FAIL(test, "insert failed, step %d/%d\n", m, n);
+ goto out;
+ }
+
+ if (drm_mm_hole_follows(node)) {
+ KUNIT_FAIL(test,
+ "hole after topdown insert %d/%d, start=%llx\n",
+ m, n, node->start);
+ goto out;
+ }
+
+ last = find_last_bit(bitmap, count);
+ if (node_index(node) != last) {
+ KUNIT_FAIL(test,
+ "node %d/%d, size %d, not inserted into upmost hole, expected %d, found %d\n",
+ m, n, size, last, node_index(node));
+ goto out;
+ }
+
+ __clear_bit(last, bitmap);
+ }
+
+ DRM_MM_BUG_ON(find_first_bit(bitmap, count) != count);
+
+ o += n;
+ }
+
+ drm_mm_for_each_node_safe(node, next, &mm)
+ drm_mm_remove_node(node);
+ DRM_MM_BUG_ON(!drm_mm_clean(&mm));
+ cond_resched();
+ }
+
+out:
+ drm_mm_for_each_node_safe(node, next, &mm)
+ drm_mm_remove_node(node);
+ drm_mm_takedown(&mm);
+ kfree(order);
+err_bitmap:
+ bitmap_free(bitmap);
+err_nodes:
+ vfree(nodes);
+}
+
+static void drm_test_mm_bottomup(struct kunit *test)
+{
+ const struct insert_mode *bottomup = &insert_modes[BOTTOMUP];
+
+ DRM_RND_STATE(prng, random_seed);
+ const unsigned int count = 8192;
+ unsigned int size;
+ unsigned long *bitmap;
+ struct drm_mm mm;
+ struct drm_mm_node *nodes, *node, *next;
+ unsigned int *order, n, m, o = 0;
+
+ /* Like drm_test_mm_topdown, but instead of searching for the last hole,
+ * we search for the first.
+ */
+
+ nodes = vzalloc(array_size(count, sizeof(*nodes)));
+ KUNIT_ASSERT_TRUE(test, nodes);
+
+ bitmap = bitmap_zalloc(count, GFP_KERNEL);
+ if (!bitmap)
+ goto err_nodes;
+
+ order = drm_random_order(count, &prng);
+ if (!order)
+ goto err_bitmap;
+
+ for (size = 1; size <= 64; size <<= 1) {
+ drm_mm_init(&mm, 0, size * count);
+ for (n = 0; n < count; n++) {
+ if (!expect_insert(test, &mm, &nodes[n], size, 0, n, bottomup)) {
+ KUNIT_FAIL(test,
+ "bottomup insert failed, size %u step %d\n", size, n);
+ goto out;
+ }
+
+ if (!assert_one_hole(test, &mm, size * (n + 1), size * count))
+ goto out;
+ }
+
+ if (!assert_continuous(test, &mm, size))
+ goto out;
+
+ drm_random_reorder(order, count, &prng);
+ for_each_prime_number_from(n, 1, min(count, max_prime)) {
+ for (m = 0; m < n; m++) {
+ node = &nodes[order[(o + m) % count]];
+ drm_mm_remove_node(node);
+ __set_bit(node_index(node), bitmap);
+ }
+
+ for (m = 0; m < n; m++) {
+ unsigned int first;
+
+ node = &nodes[order[(o + m) % count]];
+ if (!expect_insert(test, &mm, node, size, 0, 0, bottomup)) {
+ KUNIT_FAIL(test, "insert failed, step %d/%d\n", m, n);
+ goto out;
+ }
+
+ first = find_first_bit(bitmap, count);
+ if (node_index(node) != first) {
+ KUNIT_FAIL(test,
+ "node %d/%d not inserted into bottom hole, expected %d, found %d\n",
+ m, n, first, node_index(node));
+ goto out;
+ }
+ __clear_bit(first, bitmap);
+ }
+
+ DRM_MM_BUG_ON(find_first_bit(bitmap, count) != count);
+
+ o += n;
+ }
+
+ drm_mm_for_each_node_safe(node, next, &mm)
+ drm_mm_remove_node(node);
+ DRM_MM_BUG_ON(!drm_mm_clean(&mm));
+ cond_resched();
+ }
+
+out:
+ drm_mm_for_each_node_safe(node, next, &mm)
+ drm_mm_remove_node(node);
+ drm_mm_takedown(&mm);
+ kfree(order);
+err_bitmap:
+ bitmap_free(bitmap);
+err_nodes:
+ vfree(nodes);
+}
+
+static void drm_test_mm_once(struct kunit *test, unsigned int mode)
+{
+ struct drm_mm mm;
+ struct drm_mm_node rsvd_lo, rsvd_hi, node;
+
+ drm_mm_init(&mm, 0, 7);
+
+ memset(&rsvd_lo, 0, sizeof(rsvd_lo));
+ rsvd_lo.start = 1;
+ rsvd_lo.size = 1;
+ if (drm_mm_reserve_node(&mm, &rsvd_lo)) {
+ KUNIT_FAIL(test, "Could not reserve low node\n");
+ goto err;
+ }
+
+ memset(&rsvd_hi, 0, sizeof(rsvd_hi));
+ rsvd_hi.start = 5;
+ rsvd_hi.size = 1;
+ if (drm_mm_reserve_node(&mm, &rsvd_hi)) {
+ KUNIT_FAIL(test, "Could not reserve low node\n");
+ goto err_lo;
+ }
+
+ if (!drm_mm_hole_follows(&rsvd_lo) || !drm_mm_hole_follows(&rsvd_hi)) {
+ KUNIT_FAIL(test, "Expected a hole after lo and high nodes!\n");
+ goto err_hi;
+ }
+
+ memset(&node, 0, sizeof(node));
+ if (drm_mm_insert_node_generic(&mm, &node, 2, 0, 0, mode)) {
+ KUNIT_FAIL(test, "Could not insert the node into the available hole!\n");
+ goto err_hi;
+ }
+
+ drm_mm_remove_node(&node);
+err_hi:
+ drm_mm_remove_node(&rsvd_hi);
+err_lo:
+ drm_mm_remove_node(&rsvd_lo);
+err:
+ drm_mm_takedown(&mm);
+}
+
+static void drm_test_mm_lowest(struct kunit *test)
+{
+ drm_test_mm_once(test, DRM_MM_INSERT_LOW);
+}
+
+static void drm_test_mm_highest(struct kunit *test)
+{
+ drm_test_mm_once(test, DRM_MM_INSERT_HIGH);
+}
+
+static void separate_adjacent_colors(const struct drm_mm_node *node,
+ unsigned long color, u64 *start, u64 *end)
+{
+ if (drm_mm_node_allocated(node) && node->color != color)
+ ++*start;
+
+ node = list_next_entry(node, node_list);
+ if (drm_mm_node_allocated(node) && node->color != color)
+ --*end;
+}
+
+static bool colors_abutt(struct kunit *test, const struct drm_mm_node *node)
+{
+ if (!drm_mm_hole_follows(node) &&
+ drm_mm_node_allocated(list_next_entry(node, node_list))) {
+ KUNIT_FAIL(test, "colors abutt; %ld [%llx + %llx] is next to %ld [%llx + %llx]!\n",
+ node->color, node->start, node->size,
+ list_next_entry(node, node_list)->color,
+ list_next_entry(node, node_list)->start,
+ list_next_entry(node, node_list)->size);
+ return true;
+ }
+
+ return false;
+}
+
+static void drm_test_mm_color(struct kunit *test)
+{
+ const unsigned int count = min(4096u, max_iterations);
+ const struct insert_mode *mode;
+ struct drm_mm mm;
+ struct drm_mm_node *node, *nn;
+ unsigned int n;
+
+ /* Color adjustment complicates everything. First we just check
+ * that when we insert a node we apply any color_adjustment callback.
+ * The callback we use should ensure that there is a gap between
+ * any two nodes, and so after each insertion we check that those
+ * holes are inserted and that they are preserved.
+ */
+
+ drm_mm_init(&mm, 0, U64_MAX);
+
+ for (n = 1; n <= count; n++) {
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ goto out;
+
+ if (!expect_insert(test, &mm, node, n, 0, n, &insert_modes[0])) {
+ KUNIT_FAIL(test, "insert failed, step %d\n", n);
+ kfree(node);
+ goto out;
+ }
+ }
+
+ drm_mm_for_each_node_safe(node, nn, &mm) {
+ if (node->color != node->size) {
+ KUNIT_FAIL(test, "invalid color stored: expected %lld, found %ld\n",
+ node->size, node->color);
+
+ goto out;
+ }
+
+ drm_mm_remove_node(node);
+ kfree(node);
+ }
+
+ /* Now, let's start experimenting with applying a color callback */
+ mm.color_adjust = separate_adjacent_colors;
+ for (mode = insert_modes; mode->name; mode++) {
+ u64 last;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ goto out;
+
+ node->size = 1 + 2 * count;
+ node->color = node->size;
+
+ if (drm_mm_reserve_node(&mm, node)) {
+ KUNIT_FAIL(test, "initial reserve failed!\n");
+ goto out;
+ }
+
+ last = node->start + node->size;
+
+ for (n = 1; n <= count; n++) {
+ int rem;
+
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ goto out;
+
+ node->start = last;
+ node->size = n + count;
+ node->color = node->size;
+
+ if (drm_mm_reserve_node(&mm, node) != -ENOSPC) {
+ KUNIT_FAIL(test, "reserve %d did not report color overlap!", n);
+ goto out;
+ }
+
+ node->start += n + 1;
+ rem = misalignment(node, n + count);
+ node->start += n + count - rem;
+
+ if (drm_mm_reserve_node(&mm, node)) {
+ KUNIT_FAIL(test, "reserve %d failed", n);
+ goto out;
+ }
+
+ last = node->start + node->size;
+ }
+
+ for (n = 1; n <= count; n++) {
+ node = kzalloc(sizeof(*node), GFP_KERNEL);
+ if (!node)
+ goto out;
+
+ if (!expect_insert(test, &mm, node, n, n, n, mode)) {
+ KUNIT_FAIL(test, "%s insert failed, step %d\n", mode->name, n);
+ kfree(node);
+ goto out;
+ }
+ }
+
+ drm_mm_for_each_node_safe(node, nn, &mm) {
+ u64 rem;
+
+ if (node->color != node->size) {
+ KUNIT_FAIL(test,
+ "%s invalid color stored: expected %lld, found %ld\n",
+ mode->name, node->size, node->color);
+
+ goto out;
+ }
+
+ if (colors_abutt(test, node))
+ goto out;
+
+ div64_u64_rem(node->start, node->size, &rem);
+ if (rem) {
+ KUNIT_FAIL(test,
+ "%s colored node misaligned, start=%llx expected alignment=%lld [rem=%lld]\n",
+ mode->name, node->start, node->size, rem);
+ goto out;
+ }
+
+ drm_mm_remove_node(node);
+ kfree(node);
+ }
+
+ cond_resched();
+ }
+
+out:
+ drm_mm_for_each_node_safe(node, nn, &mm) {
+ drm_mm_remove_node(node);
+ kfree(node);
+ }
+ drm_mm_takedown(&mm);
+}
+
+static int evict_color(struct kunit *test, struct drm_mm *mm, u64 range_start,
+ u64 range_end, struct evict_node *nodes, unsigned int *order,
+ unsigned int count, unsigned int size, unsigned int alignment,
+ unsigned long color, const struct insert_mode *mode)
+{
+ struct drm_mm_scan scan;
+ LIST_HEAD(evict_list);
+ struct evict_node *e;
+ struct drm_mm_node tmp;
+ int err;
+
+ drm_mm_scan_init_with_range(&scan, mm, size, alignment, color, range_start,
+ range_end, mode->mode);
+ if (!evict_nodes(test, &scan, nodes, order, count, true, &evict_list))
+ return -EINVAL;
+
+ memset(&tmp, 0, sizeof(tmp));
+ err = drm_mm_insert_node_generic(mm, &tmp, size, alignment, color,
+ DRM_MM_INSERT_EVICT);
+ if (err) {
+ KUNIT_FAIL(test,
+ "Failed to insert into eviction hole: size=%d, align=%d, color=%lu, err=%d\n",
+ size, alignment, color, err);
+ show_scan(test, &scan);
+ show_holes(test, mm, 3);
+ return err;
+ }
+
+ if (tmp.start < range_start || tmp.start + tmp.size > range_end) {
+ KUNIT_FAIL(test,
+ "Inserted [address=%llu + %llu] did not fit into the request range [%llu, %llu]\n",
+ tmp.start, tmp.size, range_start, range_end);
+ err = -EINVAL;
+ }
+
+ if (colors_abutt(test, &tmp))
+ err = -EINVAL;
+
+ if (!assert_node(test, &tmp, mm, size, alignment, color)) {
+ KUNIT_FAIL(test,
+ "Inserted did not fit the eviction hole: size=%lld [%d], align=%d [rem=%lld], start=%llx\n",
+ tmp.size, size, alignment, misalignment(&tmp, alignment), tmp.start);
+ err = -EINVAL;
+ }
+
+ drm_mm_remove_node(&tmp);
+ if (err)
+ return err;
+
+ list_for_each_entry(e, &evict_list, link) {
+ err = drm_mm_reserve_node(mm, &e->node);
+ if (err) {
+ KUNIT_FAIL(test, "Failed to reinsert node after eviction: start=%llx\n",
+ e->node.start);
+ return err;
+ }
+ }
+
+ cond_resched();
+ return 0;
+}
+
+static void drm_test_mm_color_evict(struct kunit *test)
+{
+ DRM_RND_STATE(prng, random_seed);
+ const unsigned int total_size = min(8192u, max_iterations);
+ const struct insert_mode *mode;
+ unsigned long color = 0;
+ struct drm_mm mm;
+ struct evict_node *nodes;
+ struct drm_mm_node *node, *next;
+ unsigned int *order, n;
+
+ /* Check that the drm_mm_scan also honours color adjustment when
+ * choosing its victims to create a hole. Our color_adjust does not
+ * allow two nodes to be placed together without an intervening hole
+ * enlarging the set of victims that must be evicted.
+ */
+
+ nodes = vzalloc(array_size(total_size, sizeof(*nodes)));
+ KUNIT_ASSERT_TRUE(test, nodes);
+
+ order = drm_random_order(total_size, &prng);
+ if (!order)
+ goto err_nodes;
+
+ drm_mm_init(&mm, 0, 2 * total_size - 1);
+ mm.color_adjust = separate_adjacent_colors;
+ for (n = 0; n < total_size; n++) {
+ if (!expect_insert(test, &mm, &nodes[n].node,
+ 1, 0, color++,
+ &insert_modes[0])) {
+ KUNIT_FAIL(test, "insert failed, step %d\n", n);
+ goto out;
+ }
+ }
+
+ for (mode = evict_modes; mode->name; mode++) {
+ for (n = 1; n <= total_size; n <<= 1) {
+ drm_random_reorder(order, total_size, &prng);
+ if (evict_color(test, &mm, 0, U64_MAX, nodes, order, total_size,
+ n, 1, color++, mode)) {
+ KUNIT_FAIL(test, "%s evict_color(size=%u) failed\n", mode->name, n);
+ goto out;
+ }
+ }
+
+ for (n = 1; n < total_size; n <<= 1) {
+ drm_random_reorder(order, total_size, &prng);
+ if (evict_color(test, &mm, 0, U64_MAX, nodes, order, total_size,
+ total_size / 2, n, color++, mode)) {
+ KUNIT_FAIL(test, "%s evict_color(size=%u, alignment=%u) failed\n",
+ mode->name, total_size / 2, n);
+ goto out;
+ }
+ }
+
+ for_each_prime_number_from(n, 1, min(total_size, max_prime)) {
+ unsigned int nsize = (total_size - n + 1) / 2;
+
+ DRM_MM_BUG_ON(!nsize);
+
+ drm_random_reorder(order, total_size, &prng);
+ if (evict_color(test, &mm, 0, U64_MAX, nodes, order, total_size,
+ nsize, n, color++, mode)) {
+ KUNIT_FAIL(test, "%s evict_color(size=%u, alignment=%u) failed\n",
+ mode->name, nsize, n);
+ goto out;
+ }
+ }
+
+ cond_resched();
+ }
+
+out:
+ drm_mm_for_each_node_safe(node, next, &mm)
+ drm_mm_remove_node(node);
+ drm_mm_takedown(&mm);
+ kfree(order);
+err_nodes:
+ vfree(nodes);
+}
+
+static void drm_test_mm_color_evict_range(struct kunit *test)
+{
+ DRM_RND_STATE(prng, random_seed);
+ const unsigned int total_size = 8192;
+ const unsigned int range_size = total_size / 2;
+ const unsigned int range_start = total_size / 4;
+ const unsigned int range_end = range_start + range_size;
+ const struct insert_mode *mode;
+ unsigned long color = 0;
+ struct drm_mm mm;
+ struct evict_node *nodes;
+ struct drm_mm_node *node, *next;
+ unsigned int *order, n;
+
+ /* Like drm_test_mm_color_evict(), but limited to small portion of the full
+ * drm_mm range.
+ */
+
+ nodes = vzalloc(array_size(total_size, sizeof(*nodes)));
+ KUNIT_ASSERT_TRUE(test, nodes);
+
+ order = drm_random_order(total_size, &prng);
+ if (!order)
+ goto err_nodes;
+
+ drm_mm_init(&mm, 0, 2 * total_size - 1);
+ mm.color_adjust = separate_adjacent_colors;
+ for (n = 0; n < total_size; n++) {
+ if (!expect_insert(test, &mm, &nodes[n].node,
+ 1, 0, color++,
+ &insert_modes[0])) {
+ KUNIT_FAIL(test, "insert failed, step %d\n", n);
+ goto out;
+ }
+ }
+
+ for (mode = evict_modes; mode->name; mode++) {
+ for (n = 1; n <= range_size; n <<= 1) {
+ drm_random_reorder(order, range_size, &prng);
+ if (evict_color(test, &mm, range_start, range_end, nodes, order,
+ total_size, n, 1, color++, mode)) {
+ KUNIT_FAIL(test,
+ "%s evict_color(size=%u) failed for range [%x, %x]\n",
+ mode->name, n, range_start, range_end);
+ goto out;
+ }
+ }
+
+ for (n = 1; n < range_size; n <<= 1) {
+ drm_random_reorder(order, total_size, &prng);
+ if (evict_color(test, &mm, range_start, range_end, nodes, order,
+ total_size, range_size / 2, n, color++, mode)) {
+ KUNIT_FAIL(test,
+ "%s evict_color(size=%u, alignment=%u) failed for range [%x, %x]\n",
+ mode->name, total_size / 2, n, range_start, range_end);
+ goto out;
+ }
+ }
+
+ for_each_prime_number_from(n, 1, min(range_size, max_prime)) {
+ unsigned int nsize = (range_size - n + 1) / 2;
+
+ DRM_MM_BUG_ON(!nsize);
+
+ drm_random_reorder(order, total_size, &prng);
+ if (evict_color(test, &mm, range_start, range_end, nodes, order,
+ total_size, nsize, n, color++, mode)) {
+ KUNIT_FAIL(test,
+ "%s evict_color(size=%u, alignment=%u) failed for range [%x, %x]\n",
+ mode->name, nsize, n, range_start, range_end);
+ goto out;
+ }
+ }
+
+ cond_resched();
+ }
+
+out:
+ drm_mm_for_each_node_safe(node, next, &mm)
+ drm_mm_remove_node(node);
+ drm_mm_takedown(&mm);
+ kfree(order);
+err_nodes:
+ vfree(nodes);
+}
+
+static int drm_mm_init_test(struct kunit *test)
+{
+ while (!random_seed)
+ random_seed = get_random_u32();
+
+ return 0;
+}
+
+module_param(random_seed, uint, 0400);
+module_param(max_iterations, uint, 0400);
+module_param(max_prime, uint, 0400);
+
+static struct kunit_case drm_mm_tests[] = {
+ KUNIT_CASE(drm_test_mm_init),
+ KUNIT_CASE(drm_test_mm_debug),
+ KUNIT_CASE(drm_test_mm_reserve),
+ KUNIT_CASE(drm_test_mm_insert),
+ KUNIT_CASE(drm_test_mm_replace),
+ KUNIT_CASE(drm_test_mm_insert_range),
+ KUNIT_CASE(drm_test_mm_frag),
+ KUNIT_CASE(drm_test_mm_align),
+ KUNIT_CASE(drm_test_mm_align32),
+ KUNIT_CASE(drm_test_mm_align64),
+ KUNIT_CASE(drm_test_mm_evict),
+ KUNIT_CASE(drm_test_mm_evict_range),
+ KUNIT_CASE(drm_test_mm_topdown),
+ KUNIT_CASE(drm_test_mm_bottomup),
+ KUNIT_CASE(drm_test_mm_lowest),
+ KUNIT_CASE(drm_test_mm_highest),
+ KUNIT_CASE(drm_test_mm_color),
+ KUNIT_CASE(drm_test_mm_color_evict),
+ KUNIT_CASE(drm_test_mm_color_evict_range),
+ {}
+};
+
+static struct kunit_suite drm_mm_test_suite = {
+ .name = "drm_mm",
+ .init = drm_mm_init_test,
+ .test_cases = drm_mm_tests,
+};
+
+kunit_test_suite(drm_mm_test_suite);
+
+MODULE_AUTHOR("Intel Corporation");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tests/drm_plane_helper_test.c b/drivers/gpu/drm/tests/drm_plane_helper_test.c
new file mode 100644
index 000000000..ec71af791
--- /dev/null
+++ b/drivers/gpu/drm/tests/drm_plane_helper_test.c
@@ -0,0 +1,237 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test cases for the drm_plane_helper functions
+ *
+ * Copyright (c) 2022 Maíra Canal <mairacanal@riseup.net>
+ */
+
+#include <kunit/test.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_framebuffer.h>
+#include <drm/drm_modes.h>
+
+static void set_src(struct drm_plane_state *plane_state,
+ unsigned int src_x, unsigned int src_y,
+ unsigned int src_w, unsigned int src_h)
+{
+ plane_state->src_x = src_x;
+ plane_state->src_y = src_y;
+ plane_state->src_w = src_w;
+ plane_state->src_h = src_h;
+}
+
+static bool check_src_eq(struct drm_plane_state *plane_state,
+ unsigned int src_x, unsigned int src_y,
+ unsigned int src_w, unsigned int src_h)
+{
+ if (plane_state->src.x1 < 0) {
+ pr_err("src x coordinate %x should never be below 0.\n", plane_state->src.x1);
+ drm_rect_debug_print("src: ", &plane_state->src, true);
+ return false;
+ }
+ if (plane_state->src.y1 < 0) {
+ pr_err("src y coordinate %x should never be below 0.\n", plane_state->src.y1);
+ drm_rect_debug_print("src: ", &plane_state->src, true);
+ return false;
+ }
+
+ if (plane_state->src.x1 != src_x ||
+ plane_state->src.y1 != src_y ||
+ drm_rect_width(&plane_state->src) != src_w ||
+ drm_rect_height(&plane_state->src) != src_h) {
+ drm_rect_debug_print("src: ", &plane_state->src, true);
+ return false;
+ }
+
+ return true;
+}
+
+static void set_crtc(struct drm_plane_state *plane_state,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h)
+{
+ plane_state->crtc_x = crtc_x;
+ plane_state->crtc_y = crtc_y;
+ plane_state->crtc_w = crtc_w;
+ plane_state->crtc_h = crtc_h;
+}
+
+static bool check_crtc_eq(struct drm_plane_state *plane_state,
+ int crtc_x, int crtc_y,
+ unsigned int crtc_w, unsigned int crtc_h)
+{
+ if (plane_state->dst.x1 != crtc_x ||
+ plane_state->dst.y1 != crtc_y ||
+ drm_rect_width(&plane_state->dst) != crtc_w ||
+ drm_rect_height(&plane_state->dst) != crtc_h) {
+ drm_rect_debug_print("dst: ", &plane_state->dst, false);
+
+ return false;
+ }
+
+ return true;
+}
+
+static void drm_test_check_plane_state(struct kunit *test)
+{
+ int ret;
+
+ static const struct drm_crtc_state crtc_state = {
+ .crtc = ZERO_SIZE_PTR,
+ .enable = true,
+ .active = true,
+ .mode = {
+ DRM_MODE("1024x768", 0, 65000, 1024, 1048, 1184, 1344, 0, 768, 771,
+ 777, 806, 0, DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC)
+ },
+ };
+ static struct drm_plane plane = {
+ .dev = NULL
+ };
+ static struct drm_framebuffer fb = {
+ .width = 2048,
+ .height = 2048
+ };
+ static struct drm_plane_state plane_state = {
+ .plane = &plane,
+ .crtc = ZERO_SIZE_PTR,
+ .fb = &fb,
+ .rotation = DRM_MODE_ROTATE_0
+ };
+
+ /* Simple clipping, no scaling. */
+ set_src(&plane_state, 0, 0, fb.width << 16, fb.height << 16);
+ set_crtc(&plane_state, 0, 0, fb.width, fb.height);
+ ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ false, false);
+ KUNIT_EXPECT_FALSE_MSG(test, ret, 0, "Simple clipping check should pass\n");
+ KUNIT_EXPECT_TRUE(test, plane_state.visible);
+ KUNIT_EXPECT_TRUE(test, check_src_eq(&plane_state, 0, 0, 1024 << 16, 768 << 16));
+ KUNIT_EXPECT_TRUE(test, check_crtc_eq(&plane_state, 0, 0, 1024, 768));
+
+ /* Rotated clipping + reflection, no scaling. */
+ plane_state.rotation = DRM_MODE_ROTATE_90 | DRM_MODE_REFLECT_X;
+ ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ false, false);
+ KUNIT_EXPECT_FALSE_MSG(test, ret, 0, "Rotated clipping check should pass\n");
+ KUNIT_EXPECT_TRUE(test, plane_state.visible);
+ KUNIT_EXPECT_TRUE(test, check_src_eq(&plane_state, 0, 0, 768 << 16, 1024 << 16));
+ KUNIT_EXPECT_TRUE(test, check_crtc_eq(&plane_state, 0, 0, 1024, 768));
+ plane_state.rotation = DRM_MODE_ROTATE_0;
+
+ /* Check whether positioning works correctly. */
+ set_src(&plane_state, 0, 0, 1023 << 16, 767 << 16);
+ set_crtc(&plane_state, 0, 0, 1023, 767);
+ ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ false, false);
+ KUNIT_EXPECT_TRUE_MSG(test, ret,
+ "Should not be able to position on the crtc with can_position=false\n");
+
+ ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
+ DRM_PLANE_NO_SCALING,
+ DRM_PLANE_NO_SCALING,
+ true, false);
+ KUNIT_EXPECT_FALSE_MSG(test, ret, 0, "Simple positioning should work\n");
+ KUNIT_EXPECT_TRUE(test, plane_state.visible);
+ KUNIT_EXPECT_TRUE(test, check_src_eq(&plane_state, 0, 0, 1023 << 16, 767 << 16));
+ KUNIT_EXPECT_TRUE(test, check_crtc_eq(&plane_state, 0, 0, 1023, 767));
+
+ /* Simple scaling tests. */
+ set_src(&plane_state, 0, 0, 512 << 16, 384 << 16);
+ set_crtc(&plane_state, 0, 0, 1024, 768);
+ ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
+ 0x8001,
+ DRM_PLANE_NO_SCALING,
+ false, false);
+ KUNIT_EXPECT_TRUE_MSG(test, ret, "Upscaling out of range should fail.\n");
+ ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
+ 0x8000,
+ DRM_PLANE_NO_SCALING,
+ false, false);
+ KUNIT_EXPECT_FALSE_MSG(test, ret, 0, "Upscaling exactly 2x should work\n");
+ KUNIT_EXPECT_TRUE(test, plane_state.visible);
+ KUNIT_EXPECT_TRUE(test, check_src_eq(&plane_state, 0, 0, 512 << 16, 384 << 16));
+ KUNIT_EXPECT_TRUE(test, check_crtc_eq(&plane_state, 0, 0, 1024, 768));
+
+ set_src(&plane_state, 0, 0, 2048 << 16, 1536 << 16);
+ ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
+ DRM_PLANE_NO_SCALING,
+ 0x1ffff, false, false);
+ KUNIT_EXPECT_TRUE_MSG(test, ret, "Downscaling out of range should fail.\n");
+ ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
+ DRM_PLANE_NO_SCALING,
+ 0x20000, false, false);
+ KUNIT_EXPECT_FALSE_MSG(test, ret, 0, "Should succeed with exact scaling limit\n");
+ KUNIT_EXPECT_TRUE(test, plane_state.visible);
+ KUNIT_EXPECT_TRUE(test, check_src_eq(&plane_state, 0, 0, 2048 << 16, 1536 << 16));
+ KUNIT_EXPECT_TRUE(test, check_crtc_eq(&plane_state, 0, 0, 1024, 768));
+
+ /* Testing rounding errors. */
+ set_src(&plane_state, 0, 0, 0x40001, 0x40001);
+ set_crtc(&plane_state, 1022, 766, 4, 4);
+ ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
+ DRM_PLANE_NO_SCALING,
+ 0x10001,
+ true, false);
+ KUNIT_EXPECT_FALSE_MSG(test, ret, 0, "Should succeed by clipping to exact multiple");
+ KUNIT_EXPECT_TRUE(test, plane_state.visible);
+ KUNIT_EXPECT_TRUE(test, check_src_eq(&plane_state, 0, 0, 2 << 16, 2 << 16));
+ KUNIT_EXPECT_TRUE(test, check_crtc_eq(&plane_state, 1022, 766, 2, 2));
+
+ set_src(&plane_state, 0x20001, 0x20001, 0x4040001, 0x3040001);
+ set_crtc(&plane_state, -2, -2, 1028, 772);
+ ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
+ DRM_PLANE_NO_SCALING,
+ 0x10001,
+ false, false);
+ KUNIT_EXPECT_FALSE_MSG(test, ret, 0, "Should succeed by clipping to exact multiple");
+ KUNIT_EXPECT_TRUE(test, plane_state.visible);
+ KUNIT_EXPECT_TRUE(test, check_src_eq(&plane_state, 0x40002, 0x40002,
+ 1024 << 16, 768 << 16));
+ KUNIT_EXPECT_TRUE(test, check_crtc_eq(&plane_state, 0, 0, 1024, 768));
+
+ set_src(&plane_state, 0, 0, 0x3ffff, 0x3ffff);
+ set_crtc(&plane_state, 1022, 766, 4, 4);
+ ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
+ 0xffff,
+ DRM_PLANE_NO_SCALING,
+ true, false);
+ KUNIT_EXPECT_FALSE_MSG(test, ret, 0, "Should succeed by clipping to exact multiple");
+ KUNIT_EXPECT_TRUE(test, plane_state.visible);
+ /* Should not be rounded to 0x20001, which would be upscaling. */
+ KUNIT_EXPECT_TRUE(test, check_src_eq(&plane_state, 0, 0, 2 << 16, 2 << 16));
+ KUNIT_EXPECT_TRUE(test, check_crtc_eq(&plane_state, 1022, 766, 2, 2));
+
+ set_src(&plane_state, 0x1ffff, 0x1ffff, 0x403ffff, 0x303ffff);
+ set_crtc(&plane_state, -2, -2, 1028, 772);
+ ret = drm_atomic_helper_check_plane_state(&plane_state, &crtc_state,
+ 0xffff,
+ DRM_PLANE_NO_SCALING,
+ false, false);
+ KUNIT_EXPECT_FALSE_MSG(test, ret, 0, "Should succeed by clipping to exact multiple");
+ KUNIT_EXPECT_TRUE(test, plane_state.visible);
+ KUNIT_EXPECT_TRUE(test, check_src_eq(&plane_state, 0x3fffe, 0x3fffe,
+ 1024 << 16, 768 << 16));
+ KUNIT_EXPECT_TRUE(test, check_crtc_eq(&plane_state, 0, 0, 1024, 768));
+}
+
+static struct kunit_case drm_plane_helper_test[] = {
+ KUNIT_CASE(drm_test_check_plane_state),
+ {}
+};
+
+static struct kunit_suite drm_plane_helper_test_suite = {
+ .name = "drm_plane_helper",
+ .test_cases = drm_plane_helper_test,
+};
+
+kunit_test_suite(drm_plane_helper_test_suite);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tests/drm_rect_test.c b/drivers/gpu/drm/tests/drm_rect_test.c
new file mode 100644
index 000000000..e9809ea32
--- /dev/null
+++ b/drivers/gpu/drm/tests/drm_rect_test.c
@@ -0,0 +1,214 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Test cases for the drm_rect functions
+ *
+ * Copyright (c) 2022 Maíra Canal <mairacanal@riseup.net>
+ */
+
+#include <kunit/test.h>
+
+#include <drm/drm_rect.h>
+
+static void drm_test_rect_clip_scaled_div_by_zero(struct kunit *test)
+{
+ struct drm_rect src, dst, clip;
+ bool visible;
+
+ /*
+ * Make sure we don't divide by zero when dst
+ * width/height is zero and dst and clip do not intersect.
+ */
+ drm_rect_init(&src, 0, 0, 0, 0);
+ drm_rect_init(&dst, 0, 0, 0, 0);
+ drm_rect_init(&clip, 1, 1, 1, 1);
+ visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+ KUNIT_EXPECT_FALSE_MSG(test, visible, "Destination not be visible\n");
+ KUNIT_EXPECT_FALSE_MSG(test, drm_rect_visible(&src), "Source should not be visible\n");
+
+ drm_rect_init(&src, 0, 0, 0, 0);
+ drm_rect_init(&dst, 3, 3, 0, 0);
+ drm_rect_init(&clip, 1, 1, 1, 1);
+ visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+ KUNIT_EXPECT_FALSE_MSG(test, visible, "Destination not be visible\n");
+ KUNIT_EXPECT_FALSE_MSG(test, drm_rect_visible(&src), "Source should not be visible\n");
+}
+
+static void drm_test_rect_clip_scaled_not_clipped(struct kunit *test)
+{
+ struct drm_rect src, dst, clip;
+ bool visible;
+
+ /* 1:1 scaling */
+ drm_rect_init(&src, 0, 0, 1 << 16, 1 << 16);
+ drm_rect_init(&dst, 0, 0, 1, 1);
+ drm_rect_init(&clip, 0, 0, 1, 1);
+
+ visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+ KUNIT_EXPECT_FALSE_MSG(test, src.x1 != 0 || src.x2 != 1 << 16 ||
+ src.y1 != 0 || src.y2 != 1 << 16, "Source badly clipped\n");
+ KUNIT_EXPECT_FALSE_MSG(test, dst.x1 != 0 || dst.x2 != 1 ||
+ dst.y1 != 0 || dst.y2 != 1, "Destination badly clipped\n");
+ KUNIT_EXPECT_TRUE_MSG(test, visible, "Destination should be visible\n");
+ KUNIT_EXPECT_TRUE_MSG(test, drm_rect_visible(&src), "Source should be visible\n");
+
+ /* 2:1 scaling */
+ drm_rect_init(&src, 0, 0, 2 << 16, 2 << 16);
+ drm_rect_init(&dst, 0, 0, 1, 1);
+ drm_rect_init(&clip, 0, 0, 1, 1);
+
+ visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+ KUNIT_EXPECT_FALSE_MSG(test, src.x1 != 0 || src.x2 != 2 << 16 ||
+ src.y1 != 0 || src.y2 != 2 << 16, "Source badly clipped\n");
+ KUNIT_EXPECT_FALSE_MSG(test, dst.x1 != 0 || dst.x2 != 1 ||
+ dst.y1 != 0 || dst.y2 != 1, "Destination badly clipped\n");
+ KUNIT_EXPECT_TRUE_MSG(test, visible, "Destination should be visible\n");
+ KUNIT_EXPECT_TRUE_MSG(test, drm_rect_visible(&src), "Source should be visible\n");
+
+ /* 1:2 scaling */
+ drm_rect_init(&src, 0, 0, 1 << 16, 1 << 16);
+ drm_rect_init(&dst, 0, 0, 2, 2);
+ drm_rect_init(&clip, 0, 0, 2, 2);
+
+ visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+ KUNIT_EXPECT_FALSE_MSG(test, src.x1 != 0 || src.x2 != 1 << 16 ||
+ src.y1 != 0 || src.y2 != 1 << 16, "Source badly clipped\n");
+ KUNIT_EXPECT_FALSE_MSG(test, dst.x1 != 0 || dst.x2 != 2 ||
+ dst.y1 != 0 || dst.y2 != 2, "Destination badly clipped\n");
+ KUNIT_EXPECT_TRUE_MSG(test, visible, "Destination should be visible\n");
+ KUNIT_EXPECT_TRUE_MSG(test, drm_rect_visible(&src), "Source should be visible\n");
+}
+
+static void drm_test_rect_clip_scaled_clipped(struct kunit *test)
+{
+ struct drm_rect src, dst, clip;
+ bool visible;
+
+ /* 1:1 scaling top/left clip */
+ drm_rect_init(&src, 0, 0, 2 << 16, 2 << 16);
+ drm_rect_init(&dst, 0, 0, 2, 2);
+ drm_rect_init(&clip, 0, 0, 1, 1);
+
+ visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+ KUNIT_EXPECT_FALSE_MSG(test, src.x1 != 0 || src.x2 != 1 << 16 ||
+ src.y1 != 0 || src.y2 != 1 << 16, "Source badly clipped\n");
+ KUNIT_EXPECT_FALSE_MSG(test, dst.x1 != 0 || dst.x2 != 1 ||
+ dst.y1 != 0 || dst.y2 != 1, "Destination badly clipped\n");
+ KUNIT_EXPECT_TRUE_MSG(test, visible, "Destination should be visible\n");
+ KUNIT_EXPECT_TRUE_MSG(test, drm_rect_visible(&src), "Source should be visible\n");
+
+ /* 1:1 scaling bottom/right clip */
+ drm_rect_init(&src, 0, 0, 2 << 16, 2 << 16);
+ drm_rect_init(&dst, 0, 0, 2, 2);
+ drm_rect_init(&clip, 1, 1, 1, 1);
+
+ visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+ KUNIT_EXPECT_FALSE_MSG(test, src.x1 != 1 << 16 || src.x2 != 2 << 16 ||
+ src.y1 != 1 << 16 || src.y2 != 2 << 16, "Source badly clipped\n");
+ KUNIT_EXPECT_FALSE_MSG(test, dst.x1 != 1 || dst.x2 != 2 || dst.y1 != 1 ||
+ dst.y2 != 2, "Destination badly clipped\n");
+ KUNIT_EXPECT_TRUE_MSG(test, visible, "Destination should be visible\n");
+ KUNIT_EXPECT_TRUE_MSG(test, drm_rect_visible(&src), "Source should be visible\n");
+
+ /* 2:1 scaling top/left clip */
+ drm_rect_init(&src, 0, 0, 4 << 16, 4 << 16);
+ drm_rect_init(&dst, 0, 0, 2, 2);
+ drm_rect_init(&clip, 0, 0, 1, 1);
+
+ visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+ KUNIT_EXPECT_FALSE_MSG(test, src.x1 != 0 || src.x2 != 2 << 16 ||
+ src.y1 != 0 || src.y2 != 2 << 16, "Source badly clipped\n");
+ KUNIT_EXPECT_FALSE_MSG(test, dst.x1 != 0 || dst.x2 != 1 || dst.y1 != 0 ||
+ dst.y2 != 1, "Destination badly clipped\n");
+ KUNIT_EXPECT_TRUE_MSG(test, visible, "Destination should be visible\n");
+ KUNIT_EXPECT_TRUE_MSG(test, drm_rect_visible(&src), "Source should be visible\n");
+
+ /* 2:1 scaling bottom/right clip */
+ drm_rect_init(&src, 0, 0, 4 << 16, 4 << 16);
+ drm_rect_init(&dst, 0, 0, 2, 2);
+ drm_rect_init(&clip, 1, 1, 1, 1);
+
+ visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+ KUNIT_EXPECT_FALSE_MSG(test, src.x1 != 2 << 16 || src.x2 != 4 << 16 ||
+ src.y1 != 2 << 16 || src.y2 != 4 << 16, "Source badly clipped\n");
+ KUNIT_EXPECT_FALSE_MSG(test, dst.x1 != 1 || dst.x2 != 2 || dst.y1 != 1 ||
+ dst.y2 != 2, "Destination badly clipped\n");
+ KUNIT_EXPECT_TRUE_MSG(test, visible, "Destination should be visible\n");
+ KUNIT_EXPECT_TRUE_MSG(test, drm_rect_visible(&src), "Source should be visible\n");
+
+ /* 1:2 scaling top/left clip */
+ drm_rect_init(&src, 0, 0, 2 << 16, 2 << 16);
+ drm_rect_init(&dst, 0, 0, 4, 4);
+ drm_rect_init(&clip, 0, 0, 2, 2);
+
+ visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+ KUNIT_EXPECT_FALSE_MSG(test, src.x1 != 0 || src.x2 != 1 << 16 ||
+ src.y1 != 0 || src.y2 != 1 << 16, "Source badly clipped\n");
+ KUNIT_EXPECT_FALSE_MSG(test, dst.x1 != 0 || dst.x2 != 2 || dst.y1 != 0 ||
+ dst.y2 != 2, "Destination badly clipped\n");
+ KUNIT_EXPECT_TRUE_MSG(test, visible, "Destination should be visible\n");
+ KUNIT_EXPECT_TRUE_MSG(test, drm_rect_visible(&src), "Source should be visible\n");
+
+ /* 1:2 scaling bottom/right clip */
+ drm_rect_init(&src, 0, 0, 2 << 16, 2 << 16);
+ drm_rect_init(&dst, 0, 0, 4, 4);
+ drm_rect_init(&clip, 2, 2, 2, 2);
+
+ visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+ KUNIT_EXPECT_FALSE_MSG(test, src.x1 != 1 << 16 || src.x2 != 2 << 16 ||
+ src.y1 != 1 << 16 || src.y2 != 2 << 16, "Source badly clipped\n");
+ KUNIT_EXPECT_FALSE_MSG(test, dst.x1 != 2 || dst.x2 != 4 || dst.y1 != 2 ||
+ dst.y2 != 4, "Destination badly clipped\n");
+ KUNIT_EXPECT_TRUE_MSG(test, visible, "Destination should be visible\n");
+ KUNIT_EXPECT_TRUE_MSG(test, drm_rect_visible(&src), "Source should be visible\n");
+}
+
+static void drm_test_rect_clip_scaled_signed_vs_unsigned(struct kunit *test)
+{
+ struct drm_rect src, dst, clip;
+ bool visible;
+
+ /*
+ * 'clip.x2 - dst.x1 >= dst width' could result a negative
+ * src rectangle width which is no longer expected by the
+ * code as it's using unsigned types. This could lead to
+ * the clipped source rectangle appering visible when it
+ * should have been fully clipped. Make sure both rectangles
+ * end up invisible.
+ */
+ drm_rect_init(&src, 0, 0, INT_MAX, INT_MAX);
+ drm_rect_init(&dst, 0, 0, 2, 2);
+ drm_rect_init(&clip, 3, 3, 1, 1);
+
+ visible = drm_rect_clip_scaled(&src, &dst, &clip);
+
+ KUNIT_EXPECT_FALSE_MSG(test, visible, "Destination should not be visible\n");
+ KUNIT_EXPECT_FALSE_MSG(test, drm_rect_visible(&src), "Source should not be visible\n");
+}
+
+static struct kunit_case drm_rect_tests[] = {
+ KUNIT_CASE(drm_test_rect_clip_scaled_div_by_zero),
+ KUNIT_CASE(drm_test_rect_clip_scaled_not_clipped),
+ KUNIT_CASE(drm_test_rect_clip_scaled_clipped),
+ KUNIT_CASE(drm_test_rect_clip_scaled_signed_vs_unsigned),
+ { }
+};
+
+static struct kunit_suite drm_rect_test_suite = {
+ .name = "drm_rect",
+ .test_cases = drm_rect_tests,
+};
+
+kunit_test_suite(drm_rect_test_suite);
+
+MODULE_LICENSE("GPL");