summaryrefslogtreecommitdiffstats
path: root/arch/s390/include/asm/kfence.h
blob: e47fd8cbe70121f3f58a42b5ba37fe2265c02599 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
/* SPDX-License-Identifier: GPL-2.0 */

#ifndef _ASM_S390_KFENCE_H
#define _ASM_S390_KFENCE_H

#include <linux/mm.h>
#include <linux/kfence.h>
#include <asm/set_memory.h>
#include <asm/page.h>

void __kernel_map_pages(struct page *page, int numpages, int enable);

static __always_inline bool arch_kfence_init_pool(void)
{
	return true;
}

#define arch_kfence_test_address(addr) ((addr) & PAGE_MASK)

/*
 * Do not split kfence pool to 4k mapping with arch_kfence_init_pool(),
 * but earlier where page table allocations still happen with memblock.
 * Reason is that arch_kfence_init_pool() gets called when the system
 * is still in a limbo state - disabling and enabling bottom halves is
 * not yet allowed, but that is what our page_table_alloc() would do.
 */
static __always_inline void kfence_split_mapping(void)
{
#ifdef CONFIG_KFENCE
	unsigned long pool_pages = KFENCE_POOL_SIZE >> PAGE_SHIFT;

	set_memory_4k((unsigned long)__kfence_pool, pool_pages);
#endif
}

static inline bool kfence_protect_page(unsigned long addr, bool protect)
{
	__kernel_map_pages(virt_to_page((void *)addr), 1, !protect);
	return true;
}

#endif /* _ASM_S390_KFENCE_H */