1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
|
From 3c21a2a16d681c2be78f257f660f1c8c1b50f7c3 Mon Sep 17 00:00:00 2001
From: Thomas Gleixner <tglx@linutronix.de>
Date: Tue, 3 Nov 2020 10:27:44 +0100
Subject: [PATCH 055/323] drm/vmgfx: Replace kmap_atomic()
Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.10/older/patches-5.10.204-rt100.tar.xz
There is no reason to disable pagefaults and preemption as a side effect of
kmap_atomic_prot().
Use kmap_local_page_prot() instead and document the reasoning for the
mapping usage with the given pgprot.
Remove the NULL pointer check for the map. These functions return a valid
address for valid pages and the return was bogus anyway as it would have
left preemption and pagefaults disabled.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Cc: VMware Graphics <linux-graphics-maintainer@vmware.com>
Cc: Roland Scheidegger <sroland@vmware.com>
Cc: David Airlie <airlied@linux.ie>
Cc: Daniel Vetter <daniel@ffwll.ch>
Cc: dri-devel@lists.freedesktop.org
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
---
drivers/gpu/drm/vmwgfx/vmwgfx_blit.c | 30 +++++++++++-----------------
1 file changed, 12 insertions(+), 18 deletions(-)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
index e8d66182cd7b..71dba228f68e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c
@@ -375,12 +375,12 @@ static int vmw_bo_cpu_blit_line(struct vmw_bo_blit_line_data *d,
copy_size = min_t(u32, copy_size, PAGE_SIZE - src_page_offset);
if (unmap_src) {
- kunmap_atomic(d->src_addr);
+ kunmap_local(d->src_addr);
d->src_addr = NULL;
}
if (unmap_dst) {
- kunmap_atomic(d->dst_addr);
+ kunmap_local(d->dst_addr);
d->dst_addr = NULL;
}
@@ -388,12 +388,8 @@ static int vmw_bo_cpu_blit_line(struct vmw_bo_blit_line_data *d,
if (WARN_ON_ONCE(dst_page >= d->dst_num_pages))
return -EINVAL;
- d->dst_addr =
- kmap_atomic_prot(d->dst_pages[dst_page],
- d->dst_prot);
- if (!d->dst_addr)
- return -ENOMEM;
-
+ d->dst_addr = kmap_local_page_prot(d->dst_pages[dst_page],
+ d->dst_prot);
d->mapped_dst = dst_page;
}
@@ -401,12 +397,8 @@ static int vmw_bo_cpu_blit_line(struct vmw_bo_blit_line_data *d,
if (WARN_ON_ONCE(src_page >= d->src_num_pages))
return -EINVAL;
- d->src_addr =
- kmap_atomic_prot(d->src_pages[src_page],
- d->src_prot);
- if (!d->src_addr)
- return -ENOMEM;
-
+ d->src_addr = kmap_local_page_prot(d->src_pages[src_page],
+ d->src_prot);
d->mapped_src = src_page;
}
diff->do_cpy(diff, d->dst_addr + dst_page_offset,
@@ -436,8 +428,10 @@ static int vmw_bo_cpu_blit_line(struct vmw_bo_blit_line_data *d,
*
* Performs a CPU blit from one buffer object to another avoiding a full
* bo vmap which may exhaust- or fragment vmalloc space.
- * On supported architectures (x86), we're using kmap_atomic which avoids
- * cross-processor TLB- and cache flushes and may, on non-HIGHMEM systems
+ *
+ * On supported architectures (x86), we're using kmap_local_prot() which
+ * avoids cross-processor TLB- and cache flushes. kmap_local_prot() will
+ * either map a highmem page with the proper pgprot on HIGHMEM=y systems or
* reference already set-up mappings.
*
* Neither of the buffer objects may be placed in PCI memory
@@ -500,9 +494,9 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst,
}
out:
if (d.src_addr)
- kunmap_atomic(d.src_addr);
+ kunmap_local(d.src_addr);
if (d.dst_addr)
- kunmap_atomic(d.dst_addr);
+ kunmap_local(d.dst_addr);
return ret;
}
--
2.43.0
|