summaryrefslogtreecommitdiffstats
path: root/debian/patches-rt/0050-x86-crashdump-32-Simplify-copy_oldmem_page.patch
diff options
context:
space:
mode:
Diffstat (limited to 'debian/patches-rt/0050-x86-crashdump-32-Simplify-copy_oldmem_page.patch')
-rw-r--r--debian/patches-rt/0050-x86-crashdump-32-Simplify-copy_oldmem_page.patch99
1 files changed, 99 insertions, 0 deletions
diff --git a/debian/patches-rt/0050-x86-crashdump-32-Simplify-copy_oldmem_page.patch b/debian/patches-rt/0050-x86-crashdump-32-Simplify-copy_oldmem_page.patch
new file mode 100644
index 000000000..6f57ef059
--- /dev/null
+++ b/debian/patches-rt/0050-x86-crashdump-32-Simplify-copy_oldmem_page.patch
@@ -0,0 +1,99 @@
+From 9d9dd47b77d859a1261ac0dc98d94018bb5bb6a0 Mon Sep 17 00:00:00 2001
+From: Thomas Gleixner <tglx@linutronix.de>
+Date: Tue, 3 Nov 2020 10:27:39 +0100
+Subject: [PATCH 050/323] x86/crashdump/32: Simplify copy_oldmem_page()
+Origin: https://www.kernel.org/pub/linux/kernel/projects/rt/5.10/older/patches-5.10.204-rt100.tar.xz
+
+Replace kmap_atomic_pfn() with kmap_local_pfn() which is preemptible and
+can take page faults.
+
+Remove the indirection of the dump page and the related cruft which is not
+longer required.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
+---
+ arch/x86/kernel/crash_dump_32.c | 48 +++++++--------------------------
+ 1 file changed, 10 insertions(+), 38 deletions(-)
+
+diff --git a/arch/x86/kernel/crash_dump_32.c b/arch/x86/kernel/crash_dump_32.c
+index 33ee47670b99..5fcac46aaf6b 100644
+--- a/arch/x86/kernel/crash_dump_32.c
++++ b/arch/x86/kernel/crash_dump_32.c
+@@ -13,8 +13,6 @@
+
+ #include <linux/uaccess.h>
+
+-static void *kdump_buf_page;
+-
+ static inline bool is_crashed_pfn_valid(unsigned long pfn)
+ {
+ #ifndef CONFIG_X86_PAE
+@@ -41,15 +39,11 @@ static inline bool is_crashed_pfn_valid(unsigned long pfn)
+ * @userbuf: if set, @buf is in user address space, use copy_to_user(),
+ * otherwise @buf is in kernel address space, use memcpy().
+ *
+- * Copy a page from "oldmem". For this page, there is no pte mapped
+- * in the current kernel. We stitch up a pte, similar to kmap_atomic.
+- *
+- * Calling copy_to_user() in atomic context is not desirable. Hence first
+- * copying the data to a pre-allocated kernel page and then copying to user
+- * space in non-atomic context.
++ * Copy a page from "oldmem". For this page, there might be no pte mapped
++ * in the current kernel.
+ */
+-ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
+- size_t csize, unsigned long offset, int userbuf)
++ssize_t copy_oldmem_page(unsigned long pfn, char *buf, size_t csize,
++ unsigned long offset, int userbuf)
+ {
+ void *vaddr;
+
+@@ -59,38 +53,16 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
+ if (!is_crashed_pfn_valid(pfn))
+ return -EFAULT;
+
+- vaddr = kmap_atomic_pfn(pfn);
++ vaddr = kmap_local_pfn(pfn);
+
+ if (!userbuf) {
+- memcpy(buf, (vaddr + offset), csize);
+- kunmap_atomic(vaddr);
++ memcpy(buf, vaddr + offset, csize);
+ } else {
+- if (!kdump_buf_page) {
+- printk(KERN_WARNING "Kdump: Kdump buffer page not"
+- " allocated\n");
+- kunmap_atomic(vaddr);
+- return -EFAULT;
+- }
+- copy_page(kdump_buf_page, vaddr);
+- kunmap_atomic(vaddr);
+- if (copy_to_user(buf, (kdump_buf_page + offset), csize))
+- return -EFAULT;
++ if (copy_to_user(buf, vaddr + offset, csize))
++ csize = -EFAULT;
+ }
+
+- return csize;
+-}
++ kunmap_local(vaddr);
+
+-static int __init kdump_buf_page_init(void)
+-{
+- int ret = 0;
+-
+- kdump_buf_page = kmalloc(PAGE_SIZE, GFP_KERNEL);
+- if (!kdump_buf_page) {
+- printk(KERN_WARNING "Kdump: Failed to allocate kdump buffer"
+- " page\n");
+- ret = -ENOMEM;
+- }
+-
+- return ret;
++ return csize;
+ }
+-arch_initcall(kdump_buf_page_init);
+--
+2.43.0
+