summaryrefslogtreecommitdiffstats
path: root/fs/f2fs/data.c
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 18:47:48 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 18:47:48 +0000
commita1865fbd182b17f2d2f465f557af5b45501c5f1c (patch)
tree59da519ef2e59c763bb8efdbe67bc348cf833767 /fs/f2fs/data.c
parentAdding upstream version 6.7.9. (diff)
downloadlinux-a1865fbd182b17f2d2f465f557af5b45501c5f1c.tar.xz
linux-a1865fbd182b17f2d2f465f557af5b45501c5f1c.zip
Adding upstream version 6.7.12.upstream/6.7.12
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'fs/f2fs/data.c')
-rw-r--r--fs/f2fs/data.c73
1 files changed, 30 insertions, 43 deletions
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index bc3f05d43b..c611d064ae 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -48,7 +48,7 @@ void f2fs_destroy_bioset(void)
bioset_exit(&f2fs_bioset);
}
-static bool __is_cp_guaranteed(struct page *page)
+bool f2fs_is_cp_guaranteed(struct page *page)
{
struct address_space *mapping = page->mapping;
struct inode *inode;
@@ -65,8 +65,6 @@ static bool __is_cp_guaranteed(struct page *page)
S_ISDIR(inode->i_mode))
return true;
- if (f2fs_is_compressed_page(page))
- return false;
if ((S_ISREG(inode->i_mode) && IS_NOQUOTA(inode)) ||
page_private_gcing(page))
return true;
@@ -338,7 +336,7 @@ static void f2fs_write_end_io(struct bio *bio)
bio_for_each_segment_all(bvec, bio, iter_all) {
struct page *page = bvec->bv_page;
- enum count_type type = WB_DATA_TYPE(page);
+ enum count_type type = WB_DATA_TYPE(page, false);
if (page_private_dummy(page)) {
clear_page_private_dummy(page);
@@ -762,7 +760,7 @@ int f2fs_submit_page_bio(struct f2fs_io_info *fio)
wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE);
inc_page_count(fio->sbi, is_read_io(fio->op) ?
- __read_io_type(page) : WB_DATA_TYPE(fio->page));
+ __read_io_type(page) : WB_DATA_TYPE(fio->page, false));
if (is_read_io(bio_op(bio)))
f2fs_submit_read_bio(fio->sbi, bio, fio->type);
@@ -973,7 +971,7 @@ alloc_new:
if (fio->io_wbc)
wbc_account_cgroup_owner(fio->io_wbc, fio->page, PAGE_SIZE);
- inc_page_count(fio->sbi, WB_DATA_TYPE(page));
+ inc_page_count(fio->sbi, WB_DATA_TYPE(page, false));
*fio->last_block = fio->new_blkaddr;
*fio->bio = bio;
@@ -1007,11 +1005,12 @@ void f2fs_submit_page_write(struct f2fs_io_info *fio)
enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
struct f2fs_bio_info *io = sbi->write_io[btype] + fio->temp;
struct page *bio_page;
+ enum count_type type;
f2fs_bug_on(sbi, is_read_io(fio->op));
f2fs_down_write(&io->io_rwsem);
-
+next:
#ifdef CONFIG_BLK_DEV_ZONED
if (f2fs_sb_has_blkzoned(sbi) && btype < META && io->zone_pending_bio) {
wait_for_completion_io(&io->zone_wait);
@@ -1021,7 +1020,6 @@ void f2fs_submit_page_write(struct f2fs_io_info *fio)
}
#endif
-next:
if (fio->in_list) {
spin_lock(&io->io_lock);
if (list_empty(&io->io_list)) {
@@ -1046,7 +1044,8 @@ next:
/* set submitted = true as a return value */
fio->submitted = 1;
- inc_page_count(sbi, WB_DATA_TYPE(bio_page));
+ type = WB_DATA_TYPE(bio_page, fio->compressed_page);
+ inc_page_count(sbi, type);
if (io->bio &&
(!io_is_mergeable(sbi, io->bio, io, fio, io->last_block_in_bio,
@@ -1059,7 +1058,8 @@ alloc_new:
if (F2FS_IO_ALIGNED(sbi) &&
(fio->type == DATA || fio->type == NODE) &&
fio->new_blkaddr & F2FS_IO_SIZE_MASK(sbi)) {
- dec_page_count(sbi, WB_DATA_TYPE(bio_page));
+ dec_page_count(sbi, WB_DATA_TYPE(bio_page,
+ fio->compressed_page));
fio->retry = 1;
goto skip;
}
@@ -1080,10 +1080,6 @@ alloc_new:
io->last_block_in_bio = fio->new_blkaddr;
trace_f2fs_submit_page_write(fio->page, fio);
-skip:
- if (fio->in_list)
- goto next;
-out:
#ifdef CONFIG_BLK_DEV_ZONED
if (f2fs_sb_has_blkzoned(sbi) && btype < META &&
is_end_zone_blkaddr(sbi, fio->new_blkaddr)) {
@@ -1096,6 +1092,10 @@ out:
__submit_merged_bio(io);
}
#endif
+skip:
+ if (fio->in_list)
+ goto next;
+out:
if (is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN) ||
!f2fs_is_checkpoint_ready(sbi))
__submit_merged_bio(io);
@@ -1179,18 +1179,12 @@ static int f2fs_submit_page_read(struct inode *inode, struct page *page,
return 0;
}
-static void __set_data_blkaddr(struct dnode_of_data *dn)
+static void __set_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
{
- struct f2fs_node *rn = F2FS_NODE(dn->node_page);
- __le32 *addr_array;
- int base = 0;
-
- if (IS_INODE(dn->node_page) && f2fs_has_extra_attr(dn->inode))
- base = get_extra_isize(dn->inode);
+ __le32 *addr = get_dnode_addr(dn->inode, dn->node_page);
- /* Get physical address of data block */
- addr_array = blkaddr_in_node(rn);
- addr_array[base + dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
+ dn->data_blkaddr = blkaddr;
+ addr[dn->ofs_in_node] = cpu_to_le32(dn->data_blkaddr);
}
/*
@@ -1199,18 +1193,17 @@ static void __set_data_blkaddr(struct dnode_of_data *dn)
* ->node_page
* update block addresses in the node page
*/
-void f2fs_set_data_blkaddr(struct dnode_of_data *dn)
+void f2fs_set_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
{
f2fs_wait_on_page_writeback(dn->node_page, NODE, true, true);
- __set_data_blkaddr(dn);
+ __set_data_blkaddr(dn, blkaddr);
if (set_page_dirty(dn->node_page))
dn->node_changed = true;
}
void f2fs_update_data_blkaddr(struct dnode_of_data *dn, block_t blkaddr)
{
- dn->data_blkaddr = blkaddr;
- f2fs_set_data_blkaddr(dn);
+ f2fs_set_data_blkaddr(dn, blkaddr);
f2fs_update_read_extent_cache(dn);
}
@@ -1225,7 +1218,8 @@ int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
return -EPERM;
- if (unlikely((err = inc_valid_block_count(sbi, dn->inode, &count))))
+ err = inc_valid_block_count(sbi, dn->inode, &count, true);
+ if (unlikely(err))
return err;
trace_f2fs_reserve_new_blocks(dn->inode, dn->nid,
@@ -1237,8 +1231,7 @@ int f2fs_reserve_new_blocks(struct dnode_of_data *dn, blkcnt_t count)
block_t blkaddr = f2fs_data_blkaddr(dn);
if (blkaddr == NULL_ADDR) {
- dn->data_blkaddr = NEW_ADDR;
- __set_data_blkaddr(dn);
+ __set_data_blkaddr(dn, NEW_ADDR);
count--;
}
}
@@ -1483,7 +1476,7 @@ static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
dn->data_blkaddr = f2fs_data_blkaddr(dn);
if (dn->data_blkaddr == NULL_ADDR) {
- err = inc_valid_block_count(sbi, dn->inode, &count);
+ err = inc_valid_block_count(sbi, dn->inode, &count, true);
if (unlikely(err))
return err;
}
@@ -1492,11 +1485,9 @@ static int __allocate_data_block(struct dnode_of_data *dn, int seg_type)
old_blkaddr = dn->data_blkaddr;
f2fs_allocate_data_block(sbi, NULL, old_blkaddr, &dn->data_blkaddr,
&sum, seg_type, NULL);
- if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) {
- invalidate_mapping_pages(META_MAPPING(sbi),
- old_blkaddr, old_blkaddr);
- f2fs_invalidate_compress_page(sbi, old_blkaddr);
- }
+ if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
+ f2fs_invalidate_internal_cache(sbi, old_blkaddr);
+
f2fs_update_data_blkaddr(dn, dn->data_blkaddr);
return 0;
}
@@ -2811,8 +2802,6 @@ got_it:
f2fs_outplace_write_data(&dn, fio);
trace_f2fs_do_write_data_page(page, OPU);
set_inode_flag(inode, FI_APPEND_WRITE);
- if (page->index == 0)
- set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
out_writepage:
f2fs_put_dnode(&dn);
out:
@@ -2850,7 +2839,7 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
.encrypted_page = NULL,
.submitted = 0,
.compr_blocks = compr_blocks,
- .need_lock = LOCK_RETRY,
+ .need_lock = compr_blocks ? LOCK_DONE : LOCK_RETRY,
.post_read = f2fs_post_read_required(inode) ? 1 : 0,
.io_type = io_type,
.io_wbc = wbc,
@@ -2895,9 +2884,6 @@ int f2fs_write_single_data_page(struct page *page, int *submitted,
zero_user_segment(page, offset, PAGE_SIZE);
write:
- if (f2fs_is_drop_cache(inode))
- goto out;
-
/* Dentry/quota blocks are controlled by checkpoint */
if (S_ISDIR(inode->i_mode) || quota_inode) {
/*
@@ -2934,6 +2920,7 @@ write:
if (err == -EAGAIN) {
err = f2fs_do_write_data_page(&fio);
if (err == -EAGAIN) {
+ f2fs_bug_on(sbi, compr_blocks);
fio.need_lock = LOCK_REQ;
err = f2fs_do_write_data_page(&fio);
}