diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 17:35:05 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 17:39:31 +0000 |
commit | 85c675d0d09a45a135bddd15d7b385f8758c32fb (patch) | |
tree | 76267dbc9b9a130337be3640948fe397b04ac629 /fs/reiserfs/inode.c | |
parent | Adding upstream version 6.6.15. (diff) | |
download | linux-85c675d0d09a45a135bddd15d7b385f8758c32fb.tar.xz linux-85c675d0d09a45a135bddd15d7b385f8758c32fb.zip |
Adding upstream version 6.7.7.upstream/6.7.7
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r-- | fs/reiserfs/inode.c | 106 |
1 files changed, 51 insertions, 55 deletions
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index 86e55d4bb1..1d825459ee 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c @@ -1257,11 +1257,9 @@ static void init_inode(struct inode *inode, struct treepath *path) i_uid_write(inode, sd_v1_uid(sd)); i_gid_write(inode, sd_v1_gid(sd)); inode->i_size = sd_v1_size(sd); - inode->i_atime.tv_sec = sd_v1_atime(sd); - inode->i_mtime.tv_sec = sd_v1_mtime(sd); + inode_set_atime(inode, sd_v1_atime(sd), 0); + inode_set_mtime(inode, sd_v1_mtime(sd), 0); inode_set_ctime(inode, sd_v1_ctime(sd), 0); - inode->i_atime.tv_nsec = 0; - inode->i_mtime.tv_nsec = 0; inode->i_blocks = sd_v1_blocks(sd); inode->i_generation = le32_to_cpu(INODE_PKEY(inode)->k_dir_id); @@ -1311,11 +1309,9 @@ static void init_inode(struct inode *inode, struct treepath *path) i_uid_write(inode, sd_v2_uid(sd)); inode->i_size = sd_v2_size(sd); i_gid_write(inode, sd_v2_gid(sd)); - inode->i_mtime.tv_sec = sd_v2_mtime(sd); - inode->i_atime.tv_sec = sd_v2_atime(sd); + inode_set_mtime(inode, sd_v2_mtime(sd), 0); + inode_set_atime(inode, sd_v2_atime(sd), 0); inode_set_ctime(inode, sd_v2_ctime(sd), 0); - inode->i_mtime.tv_nsec = 0; - inode->i_atime.tv_nsec = 0; inode->i_blocks = sd_v2_blocks(sd); rdev = sd_v2_rdev(sd); if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) @@ -1370,9 +1366,9 @@ static void inode2sd(void *sd, struct inode *inode, loff_t size) set_sd_v2_uid(sd_v2, i_uid_read(inode)); set_sd_v2_size(sd_v2, size); set_sd_v2_gid(sd_v2, i_gid_read(inode)); - set_sd_v2_mtime(sd_v2, inode->i_mtime.tv_sec); - set_sd_v2_atime(sd_v2, inode->i_atime.tv_sec); - set_sd_v2_ctime(sd_v2, inode_get_ctime(inode).tv_sec); + set_sd_v2_mtime(sd_v2, inode_get_mtime_sec(inode)); + set_sd_v2_atime(sd_v2, inode_get_atime_sec(inode)); + set_sd_v2_ctime(sd_v2, inode_get_ctime_sec(inode)); set_sd_v2_blocks(sd_v2, to_fake_used_blocks(inode, SD_V2_SIZE)); if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) set_sd_v2_rdev(sd_v2, new_encode_dev(inode->i_rdev)); @@ -1391,9 +1387,9 @@ static void inode2sd_v1(void *sd, struct inode *inode, loff_t size) set_sd_v1_gid(sd_v1, i_gid_read(inode)); set_sd_v1_nlink(sd_v1, inode->i_nlink); set_sd_v1_size(sd_v1, size); - set_sd_v1_atime(sd_v1, inode->i_atime.tv_sec); - set_sd_v1_ctime(sd_v1, inode_get_ctime(inode).tv_sec); - set_sd_v1_mtime(sd_v1, inode->i_mtime.tv_sec); + set_sd_v1_atime(sd_v1, inode_get_atime_sec(inode)); + set_sd_v1_ctime(sd_v1, inode_get_ctime_sec(inode)); + set_sd_v1_mtime(sd_v1, inode_get_mtime_sec(inode)); if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) set_sd_v1_rdev(sd_v1, new_encode_dev(inode->i_rdev)); @@ -1984,7 +1980,7 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th, /* uid and gid must already be set by the caller for quota init */ - inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode); + simple_inode_init_ts(inode); inode->i_size = i_size; inode->i_blocks = 0; inode->i_bytes = 0; @@ -2507,10 +2503,10 @@ out: * start/recovery path as __block_write_full_folio, along with special * code to handle reiserfs tails. */ -static int reiserfs_write_full_page(struct page *page, +static int reiserfs_write_full_folio(struct folio *folio, struct writeback_control *wbc) { - struct inode *inode = page->mapping->host; + struct inode *inode = folio->mapping->host; unsigned long end_index = inode->i_size >> PAGE_SHIFT; int error = 0; unsigned long block; @@ -2518,7 +2514,7 @@ static int reiserfs_write_full_page(struct page *page, struct buffer_head *head, *bh; int partial = 0; int nr = 0; - int checked = PageChecked(page); + int checked = folio_test_checked(folio); struct reiserfs_transaction_handle th; struct super_block *s = inode->i_sb; int bh_per_page = PAGE_SIZE / s->s_blocksize; @@ -2526,47 +2522,46 @@ static int reiserfs_write_full_page(struct page *page, /* no logging allowed when nonblocking or from PF_MEMALLOC */ if (checked && (current->flags & PF_MEMALLOC)) { - redirty_page_for_writepage(wbc, page); - unlock_page(page); + folio_redirty_for_writepage(wbc, folio); + folio_unlock(folio); return 0; } /* - * The page dirty bit is cleared before writepage is called, which + * The folio dirty bit is cleared before writepage is called, which * means we have to tell create_empty_buffers to make dirty buffers - * The page really should be up to date at this point, so tossing + * The folio really should be up to date at this point, so tossing * in the BH_Uptodate is just a sanity check. */ - if (!page_has_buffers(page)) { - create_empty_buffers(page, s->s_blocksize, + head = folio_buffers(folio); + if (!head) + head = create_empty_buffers(folio, s->s_blocksize, (1 << BH_Dirty) | (1 << BH_Uptodate)); - } - head = page_buffers(page); /* - * last page in the file, zero out any contents past the + * last folio in the file, zero out any contents past the * last byte in the file */ - if (page->index >= end_index) { + if (folio->index >= end_index) { unsigned last_offset; last_offset = inode->i_size & (PAGE_SIZE - 1); - /* no file contents in this page */ - if (page->index >= end_index + 1 || !last_offset) { - unlock_page(page); + /* no file contents in this folio */ + if (folio->index >= end_index + 1 || !last_offset) { + folio_unlock(folio); return 0; } - zero_user_segment(page, last_offset, PAGE_SIZE); + folio_zero_segment(folio, last_offset, folio_size(folio)); } bh = head; - block = page->index << (PAGE_SHIFT - s->s_blocksize_bits); + block = folio->index << (PAGE_SHIFT - s->s_blocksize_bits); last_block = (i_size_read(inode) - 1) >> inode->i_blkbits; /* first map all the buffers, logging any direct items we find */ do { if (block > last_block) { /* * This can happen when the block size is less than - * the page size. The corresponding bytes in the page + * the folio size. The corresponding bytes in the folio * were zero filled above */ clear_buffer_dirty(bh); @@ -2593,7 +2588,7 @@ static int reiserfs_write_full_page(struct page *page, * blocks we're going to log */ if (checked) { - ClearPageChecked(page); + folio_clear_checked(folio); reiserfs_write_lock(s); error = journal_begin(&th, s, bh_per_page + 1); if (error) { @@ -2602,7 +2597,7 @@ static int reiserfs_write_full_page(struct page *page, } reiserfs_update_inode_transaction(inode); } - /* now go through and lock any dirty buffers on the page */ + /* now go through and lock any dirty buffers on the folio */ do { get_bh(bh); if (!buffer_mapped(bh)) @@ -2623,7 +2618,7 @@ static int reiserfs_write_full_page(struct page *page, lock_buffer(bh); } else { if (!trylock_buffer(bh)) { - redirty_page_for_writepage(wbc, page); + folio_redirty_for_writepage(wbc, folio); continue; } } @@ -2640,13 +2635,13 @@ static int reiserfs_write_full_page(struct page *page, if (error) goto fail; } - BUG_ON(PageWriteback(page)); - set_page_writeback(page); - unlock_page(page); + BUG_ON(folio_test_writeback(folio)); + folio_start_writeback(folio); + folio_unlock(folio); /* - * since any buffer might be the only dirty buffer on the page, - * the first submit_bh can bring the page out of writeback. + * since any buffer might be the only dirty buffer on the folio, + * the first submit_bh can bring the folio out of writeback. * be careful with the buffers. */ do { @@ -2663,10 +2658,10 @@ static int reiserfs_write_full_page(struct page *page, done: if (nr == 0) { /* - * if this page only had a direct item, it is very possible for + * if this folio only had a direct item, it is very possible for * no io to be required without there being an error. Or, * someone else could have locked them and sent them down the - * pipe without locking the page + * pipe without locking the folio */ bh = head; do { @@ -2677,18 +2672,18 @@ done: bh = bh->b_this_page; } while (bh != head); if (!partial) - SetPageUptodate(page); - end_page_writeback(page); + folio_mark_uptodate(folio); + folio_end_writeback(folio); } return error; fail: /* * catches various errors, we need to make sure any valid dirty blocks - * get to the media. The page is currently locked and not marked for + * get to the media. The folio is currently locked and not marked for * writeback */ - ClearPageUptodate(page); + folio_clear_uptodate(folio); bh = head; do { get_bh(bh); @@ -2698,16 +2693,16 @@ fail: } else { /* * clear any dirty bits that might have come from - * getting attached to a dirty page + * getting attached to a dirty folio */ clear_buffer_dirty(bh); } bh = bh->b_this_page; } while (bh != head); - SetPageError(page); - BUG_ON(PageWriteback(page)); - set_page_writeback(page); - unlock_page(page); + folio_set_error(folio); + BUG_ON(folio_test_writeback(folio)); + folio_start_writeback(folio); + folio_unlock(folio); do { struct buffer_head *next = bh->b_this_page; if (buffer_async_write(bh)) { @@ -2728,9 +2723,10 @@ static int reiserfs_read_folio(struct file *f, struct folio *folio) static int reiserfs_writepage(struct page *page, struct writeback_control *wbc) { - struct inode *inode = page->mapping->host; + struct folio *folio = page_folio(page); + struct inode *inode = folio->mapping->host; reiserfs_wait_on_write_block(inode->i_sb); - return reiserfs_write_full_page(page, wbc); + return reiserfs_write_full_folio(folio, wbc); } static void reiserfs_truncate_failed_write(struct inode *inode) |