1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
|
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _BCACHEFS_FS_IO_PAGECACHE_H
#define _BCACHEFS_FS_IO_PAGECACHE_H
#include <linux/pagemap.h>
typedef DARRAY(struct folio *) folios;
int bch2_filemap_get_contig_folios_d(struct address_space *, loff_t,
u64, fgf_t, gfp_t, folios *);
int bch2_write_invalidate_inode_pages_range(struct address_space *, loff_t, loff_t);
/*
* Use u64 for the end pos and sector helpers because if the folio covers the
* max supported range of the mapping, the start offset of the next folio
* overflows loff_t. This breaks much of the range based processing in the
* buffered write path.
*/
static inline u64 folio_end_pos(struct folio *folio)
{
return folio_pos(folio) + folio_size(folio);
}
static inline size_t folio_sectors(struct folio *folio)
{
return PAGE_SECTORS << folio_order(folio);
}
static inline loff_t folio_sector(struct folio *folio)
{
return folio_pos(folio) >> 9;
}
static inline u64 folio_end_sector(struct folio *folio)
{
return folio_end_pos(folio) >> 9;
}
#define BCH_FOLIO_SECTOR_STATE() \
x(unallocated) \
x(reserved) \
x(dirty) \
x(dirty_reserved) \
x(allocated)
enum bch_folio_sector_state {
#define x(n) SECTOR_##n,
BCH_FOLIO_SECTOR_STATE()
#undef x
};
struct bch_folio_sector {
/* Uncompressed, fully allocated replicas (or on disk reservation): */
unsigned nr_replicas:4;
/* Owns PAGE_SECTORS * replicas_reserved sized in memory reservation: */
unsigned replicas_reserved:4;
/* i_sectors: */
enum bch_folio_sector_state state:8;
};
struct bch_folio {
spinlock_t lock;
atomic_t write_count;
/*
* Is the sector state up to date with the btree?
* (Not the data itself)
*/
bool uptodate;
struct bch_folio_sector s[];
};
/* Helper for when we need to add debug instrumentation: */
static inline void bch2_folio_sector_set(struct folio *folio,
struct bch_folio *s,
unsigned i, unsigned n)
{
s->s[i].state = n;
}
/* file offset (to folio offset) to bch_folio_sector index */
static inline int folio_pos_to_s(struct folio *folio, loff_t pos)
{
u64 f_offset = pos - folio_pos(folio);
BUG_ON(pos < folio_pos(folio) || pos >= folio_end_pos(folio));
return f_offset >> SECTOR_SHIFT;
}
/* for newly allocated folios: */
static inline void __bch2_folio_release(struct folio *folio)
{
kfree(folio_detach_private(folio));
}
static inline void bch2_folio_release(struct folio *folio)
{
EBUG_ON(!folio_test_locked(folio));
__bch2_folio_release(folio);
}
static inline struct bch_folio *__bch2_folio(struct folio *folio)
{
return folio_has_private(folio)
? (struct bch_folio *) folio_get_private(folio)
: NULL;
}
static inline struct bch_folio *bch2_folio(struct folio *folio)
{
EBUG_ON(!folio_test_locked(folio));
return __bch2_folio(folio);
}
struct bch_folio *__bch2_folio_create(struct folio *, gfp_t);
struct bch_folio *bch2_folio_create(struct folio *, gfp_t);
struct bch2_folio_reservation {
struct disk_reservation disk;
struct quota_res quota;
};
static inline unsigned inode_nr_replicas(struct bch_fs *c, struct bch_inode_info *inode)
{
/* XXX: this should not be open coded */
return inode->ei_inode.bi_data_replicas
? inode->ei_inode.bi_data_replicas - 1
: c->opts.data_replicas;
}
static inline void bch2_folio_reservation_init(struct bch_fs *c,
struct bch_inode_info *inode,
struct bch2_folio_reservation *res)
{
memset(res, 0, sizeof(*res));
res->disk.nr_replicas = inode_nr_replicas(c, inode);
}
int bch2_folio_set(struct bch_fs *, subvol_inum, struct folio **, unsigned);
void bch2_bio_page_state_set(struct bio *, struct bkey_s_c);
void bch2_mark_pagecache_unallocated(struct bch_inode_info *, u64, u64);
int bch2_mark_pagecache_reserved(struct bch_inode_info *, u64 *, u64, bool);
int bch2_get_folio_disk_reservation(struct bch_fs *,
struct bch_inode_info *,
struct folio *, bool);
void bch2_folio_reservation_put(struct bch_fs *,
struct bch_inode_info *,
struct bch2_folio_reservation *);
int bch2_folio_reservation_get(struct bch_fs *,
struct bch_inode_info *,
struct folio *,
struct bch2_folio_reservation *,
unsigned, unsigned);
void bch2_set_folio_dirty(struct bch_fs *,
struct bch_inode_info *,
struct folio *,
struct bch2_folio_reservation *,
unsigned, unsigned);
vm_fault_t bch2_page_fault(struct vm_fault *);
vm_fault_t bch2_page_mkwrite(struct vm_fault *);
void bch2_invalidate_folio(struct folio *, size_t, size_t);
bool bch2_release_folio(struct folio *, gfp_t);
loff_t bch2_seek_pagecache_data(struct inode *, loff_t, loff_t, unsigned, bool);
loff_t bch2_seek_pagecache_hole(struct inode *, loff_t, loff_t, unsigned, bool);
int bch2_clamp_data_hole(struct inode *, u64 *, u64 *, unsigned, bool);
#endif /* _BCACHEFS_FS_IO_PAGECACHE_H */
|