summaryrefslogtreecommitdiffstats
path: root/mm/page-writeback.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page-writeback.c')
-rw-r--r--mm/page-writeback.c45
1 files changed, 21 insertions, 24 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 4656534b8f..f735d28de3 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -692,7 +692,6 @@ static int __bdi_set_min_ratio(struct backing_dev_info *bdi, unsigned int min_ra
if (min_ratio > 100 * BDI_RATIO_SCALE)
return -EINVAL;
- min_ratio *= BDI_RATIO_SCALE;
spin_lock_bh(&bdi_lock);
if (min_ratio > bdi->max_ratio) {
@@ -729,7 +728,8 @@ static int __bdi_set_max_ratio(struct backing_dev_info *bdi, unsigned int max_ra
ret = -EINVAL;
} else {
bdi->max_ratio = max_ratio;
- bdi->max_prop_frac = (FPROP_FRAC_BASE * max_ratio) / 100;
+ bdi->max_prop_frac = (FPROP_FRAC_BASE * max_ratio) /
+ (100 * BDI_RATIO_SCALE);
}
spin_unlock_bh(&bdi_lock);
@@ -1638,7 +1638,7 @@ static inline void wb_dirty_limits(struct dirty_throttle_control *dtc)
*/
dtc->wb_thresh = __wb_calc_thresh(dtc);
dtc->wb_bg_thresh = dtc->thresh ?
- div_u64((u64)dtc->wb_thresh * dtc->bg_thresh, dtc->thresh) : 0;
+ div64_u64(dtc->wb_thresh * dtc->bg_thresh, dtc->thresh) : 0;
/*
* In order to avoid the stacked BDI deadlock we need
@@ -1921,7 +1921,7 @@ pause:
break;
}
__set_current_state(TASK_KILLABLE);
- wb->dirty_sleep = now;
+ bdi->last_bdp_sleep = jiffies;
io_schedule_timeout(pause);
current->dirty_paused_when = now + pause;
@@ -2679,7 +2679,7 @@ void __folio_mark_dirty(struct folio *folio, struct address_space *mapping,
* @folio: Folio to be marked as dirty.
*
* Filesystems which do not use buffer heads should call this function
- * from their set_page_dirty address space operation. It ignores the
+ * from their dirty_folio address space operation. It ignores the
* contents of folio_get_private(), so if the filesystem marks individual
* blocks as dirty, the filesystem should handle that itself.
*
@@ -2953,19 +2953,16 @@ bool __folio_end_writeback(struct folio *folio)
unsigned long flags;
xa_lock_irqsave(&mapping->i_pages, flags);
- ret = folio_test_clear_writeback(folio);
- if (ret) {
- __xa_clear_mark(&mapping->i_pages, folio_index(folio),
- PAGECACHE_TAG_WRITEBACK);
- if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT) {
- struct bdi_writeback *wb = inode_to_wb(inode);
-
- wb_stat_mod(wb, WB_WRITEBACK, -nr);
- __wb_writeout_add(wb, nr);
- if (!mapping_tagged(mapping,
- PAGECACHE_TAG_WRITEBACK))
- wb_inode_writeback_end(wb);
- }
+ ret = folio_xor_flags_has_waiters(folio, 1 << PG_writeback);
+ __xa_clear_mark(&mapping->i_pages, folio_index(folio),
+ PAGECACHE_TAG_WRITEBACK);
+ if (bdi->capabilities & BDI_CAP_WRITEBACK_ACCT) {
+ struct bdi_writeback *wb = inode_to_wb(inode);
+
+ wb_stat_mod(wb, WB_WRITEBACK, -nr);
+ __wb_writeout_add(wb, nr);
+ if (!mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK))
+ wb_inode_writeback_end(wb);
}
if (mapping->host && !mapping_tagged(mapping,
@@ -2974,14 +2971,14 @@ bool __folio_end_writeback(struct folio *folio)
xa_unlock_irqrestore(&mapping->i_pages, flags);
} else {
- ret = folio_test_clear_writeback(folio);
- }
- if (ret) {
- lruvec_stat_mod_folio(folio, NR_WRITEBACK, -nr);
- zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr);
- node_stat_mod_folio(folio, NR_WRITTEN, nr);
+ ret = folio_xor_flags_has_waiters(folio, 1 << PG_writeback);
}
+
+ lruvec_stat_mod_folio(folio, NR_WRITEBACK, -nr);
+ zone_stat_mod_folio(folio, NR_ZONE_WRITE_PENDING, -nr);
+ node_stat_mod_folio(folio, NR_WRITTEN, nr);
folio_memcg_unlock(folio);
+
return ret;
}