summaryrefslogtreecommitdiffstats
path: root/arch/s390/include/asm/tlb.h
blob: e95b2c8081eb8ec4f7017aea1f4007aee8e1b7ce (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _S390_TLB_H
#define _S390_TLB_H

/*
 * TLB flushing on s390 is complicated. The following requirement
 * from the principles of operation is the most arduous:
 *
 * "A valid table entry must not be changed while it is attached
 * to any CPU and may be used for translation by that CPU except to
 * (1) invalidate the entry by using INVALIDATE PAGE TABLE ENTRY,
 * or INVALIDATE DAT TABLE ENTRY, (2) alter bits 56-63 of a page
 * table entry, or (3) make a change by means of a COMPARE AND SWAP
 * AND PURGE instruction that purges the TLB."
 *
 * The modification of a pte of an active mm struct therefore is
 * a two step process: i) invalidate the pte, ii) store the new pte.
 * This is true for the page protection bit as well.
 * The only possible optimization is to flush at the beginning of
 * a tlb_gather_mmu cycle if the mm_struct is currently not in use.
 *
 * Pages used for the page tables is a different story. FIXME: more
 */

void __tlb_remove_table(void *_table);
static inline void tlb_flush(struct mmu_gather *tlb);
static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
		struct page *page, bool delay_rmap, int page_size);
static inline bool __tlb_remove_folio_pages(struct mmu_gather *tlb,
		struct page *page, unsigned int nr_pages, bool delay_rmap);

#define tlb_flush tlb_flush
#define pte_free_tlb pte_free_tlb
#define pmd_free_tlb pmd_free_tlb
#define p4d_free_tlb p4d_free_tlb
#define pud_free_tlb pud_free_tlb

#include <asm/tlbflush.h>
#include <asm-generic/tlb.h>

/*
 * Release the page cache reference for a pte removed by
 * tlb_ptep_clear_flush. In both flush modes the tlb for a page cache page
 * has already been freed, so just do free_page_and_swap_cache.
 *
 * s390 doesn't delay rmap removal.
 */
static inline bool __tlb_remove_page_size(struct mmu_gather *tlb,
		struct page *page, bool delay_rmap, int page_size)
{
	VM_WARN_ON_ONCE(delay_rmap);

	free_page_and_swap_cache(page);
	return false;
}

static inline bool __tlb_remove_folio_pages(struct mmu_gather *tlb,
		struct page *page, unsigned int nr_pages, bool delay_rmap)
{
	struct encoded_page *encoded_pages[] = {
		encode_page(page, ENCODED_PAGE_BIT_NR_PAGES_NEXT),
		encode_nr_pages(nr_pages),
	};

	VM_WARN_ON_ONCE(delay_rmap);
	VM_WARN_ON_ONCE(page_folio(page) != page_folio(page + nr_pages - 1));

	free_pages_and_swap_cache(encoded_pages, ARRAY_SIZE(encoded_pages));
	return false;
}

static inline void tlb_flush(struct mmu_gather *tlb)
{
	__tlb_flush_mm_lazy(tlb->mm);
}

/*
 * pte_free_tlb frees a pte table and clears the CRSTE for the
 * page table from the tlb.
 */
static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
                                unsigned long address)
{
	__tlb_adjust_range(tlb, address, PAGE_SIZE);
	tlb->mm->context.flush_mm = 1;
	tlb->freed_tables = 1;
	tlb->cleared_pmds = 1;
	if (mm_alloc_pgste(tlb->mm))
		gmap_unlink(tlb->mm, (unsigned long *)pte, address);
	tlb_remove_ptdesc(tlb, pte);
}

/*
 * pmd_free_tlb frees a pmd table and clears the CRSTE for the
 * segment table entry from the tlb.
 * If the mm uses a two level page table the single pmd is freed
 * as the pgd. pmd_free_tlb checks the asce_limit against 2GB
 * to avoid the double free of the pmd in this case.
 */
static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
				unsigned long address)
{
	if (mm_pmd_folded(tlb->mm))
		return;
	pagetable_pmd_dtor(virt_to_ptdesc(pmd));
	__tlb_adjust_range(tlb, address, PAGE_SIZE);
	tlb->mm->context.flush_mm = 1;
	tlb->freed_tables = 1;
	tlb->cleared_puds = 1;
	tlb_remove_ptdesc(tlb, pmd);
}

/*
 * p4d_free_tlb frees a pud table and clears the CRSTE for the
 * region second table entry from the tlb.
 * If the mm uses a four level page table the single p4d is freed
 * as the pgd. p4d_free_tlb checks the asce_limit against 8PB
 * to avoid the double free of the p4d in this case.
 */
static inline void p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
				unsigned long address)
{
	if (mm_p4d_folded(tlb->mm))
		return;
	__tlb_adjust_range(tlb, address, PAGE_SIZE);
	tlb->mm->context.flush_mm = 1;
	tlb->freed_tables = 1;
	tlb_remove_ptdesc(tlb, p4d);
}

/*
 * pud_free_tlb frees a pud table and clears the CRSTE for the
 * region third table entry from the tlb.
 * If the mm uses a three level page table the single pud is freed
 * as the pgd. pud_free_tlb checks the asce_limit against 4TB
 * to avoid the double free of the pud in this case.
 */
static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
				unsigned long address)
{
	if (mm_pud_folded(tlb->mm))
		return;
	tlb->mm->context.flush_mm = 1;
	tlb->freed_tables = 1;
	tlb->cleared_p4ds = 1;
	tlb_remove_ptdesc(tlb, pud);
}


#endif /* _S390_TLB_H */