diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 17:40:19 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 17:40:19 +0000 |
commit | 9f0fc191371843c4fc000a226b0a26b6c059aacd (patch) | |
tree | 35f8be3ef04506ac891ad001e8c41e535ae8d01d /mm/hugetlb_vmemmap.h | |
parent | Releasing progress-linux version 6.6.15-2~progress7.99u1. (diff) | |
download | linux-9f0fc191371843c4fc000a226b0a26b6c059aacd.tar.xz linux-9f0fc191371843c4fc000a226b0a26b6c059aacd.zip |
Merging upstream version 6.7.7.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'mm/hugetlb_vmemmap.h')
-rw-r--r-- | mm/hugetlb_vmemmap.h | 31 |
1 files changed, 24 insertions, 7 deletions
diff --git a/mm/hugetlb_vmemmap.h b/mm/hugetlb_vmemmap.h index 25bd0e0024..2fcae92d33 100644 --- a/mm/hugetlb_vmemmap.h +++ b/mm/hugetlb_vmemmap.h @@ -10,15 +10,20 @@ #define _LINUX_HUGETLB_VMEMMAP_H #include <linux/hugetlb.h> -#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP -int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head); -void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head); - /* * Reserve one vmemmap page, all vmemmap addresses are mapped to it. See - * Documentation/vm/vmemmap_dedup.rst. + * Documentation/mm/vmemmap_dedup.rst. */ #define HUGETLB_VMEMMAP_RESERVE_SIZE PAGE_SIZE +#define HUGETLB_VMEMMAP_RESERVE_PAGES (HUGETLB_VMEMMAP_RESERVE_SIZE / sizeof(struct page)) + +#ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP +int hugetlb_vmemmap_restore_folio(const struct hstate *h, struct folio *folio); +long hugetlb_vmemmap_restore_folios(const struct hstate *h, + struct list_head *folio_list, + struct list_head *non_hvo_folios); +void hugetlb_vmemmap_optimize_folio(const struct hstate *h, struct folio *folio); +void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_list); static inline unsigned int hugetlb_vmemmap_size(const struct hstate *h) { @@ -38,12 +43,24 @@ static inline unsigned int hugetlb_vmemmap_optimizable_size(const struct hstate return size > 0 ? size : 0; } #else -static inline int hugetlb_vmemmap_restore(const struct hstate *h, struct page *head) +static inline int hugetlb_vmemmap_restore_folio(const struct hstate *h, struct folio *folio) { return 0; } -static inline void hugetlb_vmemmap_optimize(const struct hstate *h, struct page *head) +static long hugetlb_vmemmap_restore_folios(const struct hstate *h, + struct list_head *folio_list, + struct list_head *non_hvo_folios) +{ + list_splice_init(folio_list, non_hvo_folios); + return 0; +} + +static inline void hugetlb_vmemmap_optimize_folio(const struct hstate *h, struct folio *folio) +{ +} + +static inline void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_list) { } |