From 7998df0b6407da65a0fe8325d6ff239c4e14ff7d Mon Sep 17 00:00:00 2001 From: Joel Granados Date: Thu, 28 Mar 2024 16:57:48 +0100 Subject: memory: remove the now superfluous sentinel element from ctl_table array This commit comes at the tail end of a greater effort to remove the empty elements at the end of the ctl_table arrays (sentinels) which will reduce the overall build time size of the kernel and run time memory bloat by ~64 bytes per sentinel (further information Link : https://lore.kernel.org/all/ZO5Yx5JFogGi%2FcBo@bombadil.infradead.org/) Remove sentinel from all files under mm/ that register a sysctl table. Link: https://lkml.kernel.org/r/20240328-jag-sysctl_remset_misc-v1-1-47c1463b3af2@samsung.com Signed-off-by: Joel Granados Reviewed-by: Muchun Song Reviewed-by: Miaohe Lin Signed-off-by: Andrew Morton --- mm/page-writeback.c | 1 - 1 file changed, 1 deletion(-) (limited to 'mm/page-writeback.c') diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 3e19b87049db..fba324e1a010 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -2291,7 +2291,6 @@ static struct ctl_table vm_page_writeback_sysctls[] = { .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, - {} }; #endif -- cgit v1.2.3-59-g8ed1b From 5a550a0c60706849d70aec7f211a7e51725adb1b Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 18 Mar 2024 16:18:49 +0000 Subject: mm: Export writeback_iter() Export writeback_iter() so that it can be used by netfslib as a module. Signed-off-by: David Howells Reviewed-by: Jeff Layton cc: Matthew Wilcox (Oracle) cc: Christoph Hellwig cc: linux-mm@kvack.org --- mm/page-writeback.c | 1 + 1 file changed, 1 insertion(+) (limited to 'mm/page-writeback.c') diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 3e19b87049db..06fc89d981e8 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -2546,6 +2546,7 @@ done: folio_batch_release(&wbc->fbatch); return NULL; } +EXPORT_SYMBOL_GPL(writeback_iter); /** * write_cache_pages - walk the list of dirty pages of the given address space and write all of them. -- cgit v1.2.3-59-g8ed1b From 3d84d897920c75fc3aeeac452e8e8a4073398ce7 Mon Sep 17 00:00:00 2001 From: "Matthew Wilcox (Oracle)" Date: Tue, 16 Apr 2024 04:17:45 +0100 Subject: doc: improve the description of __folio_mark_dirty Patch series "Improve buffer head documentation", v3. Turn buffer head documentation into its own document, and make many general improvements to the docs. Obviously there is much more that could be done. Tested with make htmldocs. This patch (of 8): I've learned why it's safe to call __folio_mark_dirty() from mark_buffer_dirty() without holding the folio lock, so update the description to explain why. Link: https://lkml.kernel.org/r/20240416031754.4076917-1-willy@infradead.org Link: https://lkml.kernel.org/r/20240416031754.4076917-2-willy@infradead.org Signed-off-by: Matthew Wilcox (Oracle) Cc: Pankaj Raghav Signed-off-by: Andrew Morton --- mm/page-writeback.c | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) (limited to 'mm/page-writeback.c') diff --git a/mm/page-writeback.c b/mm/page-writeback.c index fba324e1a010..2c1f595d1ddc 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -2705,11 +2705,15 @@ void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb) * If warn is true, then emit a warning if the folio is not uptodate and has * not been truncated. * - * The caller must hold folio_memcg_lock(). Most callers have the folio - * locked. A few have the folio blocked from truncation through other - * means (eg zap_vma_pages() has it mapped and is holding the page table - * lock). This can also be called from mark_buffer_dirty(), which I - * cannot prove is always protected against truncate. + * The caller must hold folio_memcg_lock(). It is the caller's + * responsibility to prevent the folio from being truncated while + * this function is in progress, although it may have been truncated + * before this function is called. Most callers have the folio locked. + * A few have the folio blocked from truncation through other means (e.g. + * zap_vma_pages() has it mapped and is holding the page table lock). + * When called from mark_buffer_dirty(), the filesystem should hold a + * reference to the buffer_head that is being marked dirty, which causes + * try_to_free_buffers() to fail. */ void __folio_mark_dirty(struct folio *folio, struct address_space *mapping, int warn) -- cgit v1.2.3-59-g8ed1b From 4b5bbc39d7a637a135fd8b3ef86e60a6a7301e9d Mon Sep 17 00:00:00 2001 From: Kemeng Shi Date: Tue, 23 Apr 2024 11:46:40 +0800 Subject: writeback: support retrieving per group debug writeback stats of bdi Add /sys/kernel/debug/bdi/xxx/wb_stats to show per group writeback stats of bdi. Following domain hierarchy is tested: global domain (320G) / \ cgroup domain1(10G) cgroup domain2(10G) | | bdi wb1 wb2 /* per wb writeback info of bdi is collected */ cat wb_stats WbCgIno: 1 WbWriteback: 0 kB WbReclaimable: 0 kB WbDirtyThresh: 0 kB WbDirtied: 0 kB WbWritten: 0 kB WbWriteBandwidth: 102400 kBps b_dirty: 0 b_io: 0 b_more_io: 0 b_dirty_time: 0 state: 1 WbCgIno: 4091 WbWriteback: 1792 kB WbReclaimable: 820512 kB WbDirtyThresh: 6004692 kB WbDirtied: 1820448 kB WbWritten: 999488 kB WbWriteBandwidth: 169020 kBps b_dirty: 0 b_io: 0 b_more_io: 1 b_dirty_time: 0 state: 5 WbCgIno: 4131 WbWriteback: 1120 kB WbReclaimable: 820064 kB WbDirtyThresh: 6004728 kB WbDirtied: 1822688 kB WbWritten: 1002400 kB WbWriteBandwidth: 153520 kBps b_dirty: 0 b_io: 0 b_more_io: 1 b_dirty_time: 0 state: 5 [shikemeng@huaweicloud.com: fix build problems] Link: https://lkml.kernel.org/r/20240423034643.141219-4-shikemeng@huaweicloud.com Link: https://lkml.kernel.org/r/20240423034643.141219-3-shikemeng@huaweicloud.com Signed-off-by: Kemeng Shi Cc: Brian Foster Cc: David Howells Cc: David Sterba Cc: Jan Kara Cc: Mateusz Guzik Cc: Matthew Wilcox (Oracle) Cc: SeongJae Park Cc: Stephen Rothwell Cc: Tejun Heo Signed-off-by: Andrew Morton --- include/linux/writeback.h | 1 + mm/backing-dev.c | 81 +++++++++++++++++++++++++++++++++++++++++++++-- mm/page-writeback.c | 19 +++++++++++ 3 files changed, 99 insertions(+), 2 deletions(-) (limited to 'mm/page-writeback.c') diff --git a/include/linux/writeback.h b/include/linux/writeback.h index 9845cb62e40b..112d806ddbe4 100644 --- a/include/linux/writeback.h +++ b/include/linux/writeback.h @@ -355,6 +355,7 @@ int dirtytime_interval_handler(struct ctl_table *table, int write, void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty); unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh); +unsigned long cgwb_calc_thresh(struct bdi_writeback *wb); void wb_update_bandwidth(struct bdi_writeback *wb); diff --git a/mm/backing-dev.c b/mm/backing-dev.c index 089146feb830..e61bbb1bd622 100644 --- a/mm/backing-dev.c +++ b/mm/backing-dev.c @@ -155,19 +155,96 @@ static int bdi_debug_stats_show(struct seq_file *m, void *v) } DEFINE_SHOW_ATTRIBUTE(bdi_debug_stats); +static void wb_stats_show(struct seq_file *m, struct bdi_writeback *wb, + struct wb_stats *stats) +{ + + seq_printf(m, + "WbCgIno: %10lu\n" + "WbWriteback: %10lu kB\n" + "WbReclaimable: %10lu kB\n" + "WbDirtyThresh: %10lu kB\n" + "WbDirtied: %10lu kB\n" + "WbWritten: %10lu kB\n" + "WbWriteBandwidth: %10lu kBps\n" + "b_dirty: %10lu\n" + "b_io: %10lu\n" + "b_more_io: %10lu\n" + "b_dirty_time: %10lu\n" + "state: %10lx\n\n", +#ifdef CONFIG_CGROUP_WRITEBACK + cgroup_ino(wb->memcg_css->cgroup), +#else + 1ul, +#endif + K(stats->nr_writeback), + K(stats->nr_reclaimable), + K(stats->wb_thresh), + K(stats->nr_dirtied), + K(stats->nr_written), + K(wb->avg_write_bandwidth), + stats->nr_dirty, + stats->nr_io, + stats->nr_more_io, + stats->nr_dirty_time, + wb->state); +} + +static int cgwb_debug_stats_show(struct seq_file *m, void *v) +{ + struct backing_dev_info *bdi = m->private; + unsigned long background_thresh; + unsigned long dirty_thresh; + struct bdi_writeback *wb; + + global_dirty_limits(&background_thresh, &dirty_thresh); + + rcu_read_lock(); + list_for_each_entry_rcu(wb, &bdi->wb_list, bdi_node) { + struct wb_stats stats = { .dirty_thresh = dirty_thresh }; + + if (!wb_tryget(wb)) + continue; + + collect_wb_stats(&stats, wb); + + /* + * Calculate thresh of wb in writeback cgroup which is min of + * thresh in global domain and thresh in cgroup domain. Drop + * rcu lock because cgwb_calc_thresh may sleep in + * cgroup_rstat_flush. We can do so here because we have a ref. + */ + if (mem_cgroup_wb_domain(wb)) { + rcu_read_unlock(); + stats.wb_thresh = min(stats.wb_thresh, cgwb_calc_thresh(wb)); + rcu_read_lock(); + } + + wb_stats_show(m, wb, &stats); + + wb_put(wb); + } + rcu_read_unlock(); + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(cgwb_debug_stats); + static void bdi_debug_register(struct backing_dev_info *bdi, const char *name) { bdi->debug_dir = debugfs_create_dir(name, bdi_debug_root); debugfs_create_file("stats", 0444, bdi->debug_dir, bdi, &bdi_debug_stats_fops); + debugfs_create_file("wb_stats", 0444, bdi->debug_dir, bdi, + &cgwb_debug_stats_fops); } static void bdi_debug_unregister(struct backing_dev_info *bdi) { debugfs_remove_recursive(bdi->debug_dir); } -#else +#else /* CONFIG_DEBUG_FS */ static inline void bdi_debug_init(void) { } @@ -178,7 +255,7 @@ static inline void bdi_debug_register(struct backing_dev_info *bdi, static inline void bdi_debug_unregister(struct backing_dev_info *bdi) { } -#endif +#endif /* CONFIG_DEBUG_FS */ static ssize_t read_ahead_kb_store(struct device *dev, struct device_attribute *attr, diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 2c1f595d1ddc..c5fb3121469a 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -892,6 +892,25 @@ unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh) return __wb_calc_thresh(&gdtc); } +unsigned long cgwb_calc_thresh(struct bdi_writeback *wb) +{ + struct dirty_throttle_control gdtc = { GDTC_INIT_NO_WB }; + struct dirty_throttle_control mdtc = { MDTC_INIT(wb, &gdtc) }; + unsigned long filepages = 0, headroom = 0, writeback = 0; + + gdtc.avail = global_dirtyable_memory(); + gdtc.dirty = global_node_page_state(NR_FILE_DIRTY) + + global_node_page_state(NR_WRITEBACK); + + mem_cgroup_wb_stats(wb, &filepages, &headroom, + &mdtc.dirty, &writeback); + mdtc.dirty += writeback; + mdtc_calc_avail(&mdtc, filepages, headroom); + domain_dirty_limits(&mdtc); + + return __wb_calc_thresh(&mdtc); +} + /* * setpoint - dirty 3 * f(dirty) := 1.0 + (----------------) -- cgit v1.2.3-59-g8ed1b From 826881a7f66557143df5c7f9f401509aa1a7fa5c Mon Sep 17 00:00:00 2001 From: Kemeng Shi Date: Tue, 23 Apr 2024 11:46:43 +0800 Subject: writeback: rename nr_reclaimable to nr_dirty in balance_dirty_pages Commit 8d92890bd6b85 ("mm/writeback: discard NR_UNSTABLE_NFS, use NR_WRITEBACK instead") removed NR_UNSTABLE_NFS and nr_reclaimable only contains dirty page now. Rename nr_reclaimable to nr_dirty properly. Link: https://lkml.kernel.org/r/20240423034643.141219-6-shikemeng@huaweicloud.com Signed-off-by: Kemeng Shi Reviewed-by: Jan Kara Cc: Brian Foster Cc: David Howells Cc: David Sterba Cc: Mateusz Guzik Cc: Matthew Wilcox (Oracle) Cc: SeongJae Park Cc: Stephen Rothwell Cc: Tejun Heo Signed-off-by: Andrew Morton --- mm/page-writeback.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'mm/page-writeback.c') diff --git a/mm/page-writeback.c b/mm/page-writeback.c index c5fb3121469a..db5769cb12fd 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -1694,7 +1694,7 @@ static int balance_dirty_pages(struct bdi_writeback *wb, struct dirty_throttle_control * const mdtc = mdtc_valid(&mdtc_stor) ? &mdtc_stor : NULL; struct dirty_throttle_control *sdtc; - unsigned long nr_reclaimable; /* = file_dirty */ + unsigned long nr_dirty; long period; long pause; long max_pause; @@ -1715,9 +1715,9 @@ static int balance_dirty_pages(struct bdi_writeback *wb, unsigned long m_thresh = 0; unsigned long m_bg_thresh = 0; - nr_reclaimable = global_node_page_state(NR_FILE_DIRTY); + nr_dirty = global_node_page_state(NR_FILE_DIRTY); gdtc->avail = global_dirtyable_memory(); - gdtc->dirty = nr_reclaimable + global_node_page_state(NR_WRITEBACK); + gdtc->dirty = nr_dirty + global_node_page_state(NR_WRITEBACK); domain_dirty_limits(gdtc); @@ -1768,7 +1768,7 @@ static int balance_dirty_pages(struct bdi_writeback *wb, * In normal mode, we start background writeout at the lower * background_thresh, to keep the amount of dirty memory low. */ - if (!laptop_mode && nr_reclaimable > gdtc->bg_thresh && + if (!laptop_mode && nr_dirty > gdtc->bg_thresh && !writeback_in_progress(wb)) wb_start_background_writeback(wb); -- cgit v1.2.3-59-g8ed1b From 13fc441284b39ae8fc394547c032c2dad2268406 Mon Sep 17 00:00:00 2001 From: Kemeng Shi Date: Thu, 25 Apr 2024 21:17:21 +0800 Subject: mm: enable __wb_calc_thresh to calculate dirty background threshold Patch series "Fix and cleanups to page-writeback", v2. This series contains some random cleanups and a fix to correct calculation of wb's bg_thresh in cgroup domain. More details can be found respective patches. This patch (of 4): Originally, __wb_calc_thresh always calculate wb's share of dirty throttling threshold. By getting thresh of wb_domain from caller, __wb_calc_thresh could be used for both dirty throttling and dirty background threshold. This is a preparation to correct threshold calculation of wb in cgroup. Link: https://lkml.kernel.org/r/20240425131724.36778-1-shikemeng@huaweicloud.com Link: https://lkml.kernel.org/r/20240425131724.36778-2-shikemeng@huaweicloud.com Signed-off-by: Kemeng Shi Reviewed-by: Jan Kara Cc: Howard Cochran Cc: Jens Axboe Cc: Matthew Wilcox (Oracle) Cc: Miklos Szeredi Cc: Tejun Heo Signed-off-by: Andrew Morton --- mm/page-writeback.c | 33 ++++++++++++++++++--------------- 1 file changed, 18 insertions(+), 15 deletions(-) (limited to 'mm/page-writeback.c') diff --git a/mm/page-writeback.c b/mm/page-writeback.c index db5769cb12fd..2a3b68aae336 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -838,13 +838,15 @@ static void mdtc_calc_avail(struct dirty_throttle_control *mdtc, } /** - * __wb_calc_thresh - @wb's share of dirty throttling threshold + * __wb_calc_thresh - @wb's share of dirty threshold * @dtc: dirty_throttle_context of interest + * @thresh: dirty throttling or dirty background threshold of wb_domain in @dtc * - * Note that balance_dirty_pages() will only seriously take it as a hard limit - * when sleeping max_pause per page is not enough to keep the dirty pages under - * control. For example, when the device is completely stalled due to some error - * conditions, or when there are 1000 dd tasks writing to a slow 10MB/s USB key. + * Note that balance_dirty_pages() will only seriously take dirty throttling + * threshold as a hard limit when sleeping max_pause per page is not enough + * to keep the dirty pages under control. For example, when the device is + * completely stalled due to some error conditions, or when there are 1000 + * dd tasks writing to a slow 10MB/s USB key. * In the other normal situations, it acts more gently by throttling the tasks * more (rather than completely block them) when the wb dirty pages go high. * @@ -855,19 +857,20 @@ static void mdtc_calc_avail(struct dirty_throttle_control *mdtc, * The wb's share of dirty limit will be adapting to its throughput and * bounded by the bdi->min_ratio and/or bdi->max_ratio parameters, if set. * - * Return: @wb's dirty limit in pages. The term "dirty" in the context of - * dirty balancing includes all PG_dirty and PG_writeback pages. + * Return: @wb's dirty limit in pages. For dirty throttling limit, the term + * "dirty" in the context of dirty balancing includes all PG_dirty and + * PG_writeback pages. */ -static unsigned long __wb_calc_thresh(struct dirty_throttle_control *dtc) +static unsigned long __wb_calc_thresh(struct dirty_throttle_control *dtc, + unsigned long thresh) { struct wb_domain *dom = dtc_dom(dtc); - unsigned long thresh = dtc->thresh; u64 wb_thresh; unsigned long numerator, denominator; unsigned long wb_min_ratio, wb_max_ratio; /* - * Calculate this BDI's share of the thresh ratio. + * Calculate this wb's share of the thresh ratio. */ fprop_fraction_percpu(&dom->completions, dtc->wb_completions, &numerator, &denominator); @@ -887,9 +890,9 @@ static unsigned long __wb_calc_thresh(struct dirty_throttle_control *dtc) unsigned long wb_calc_thresh(struct bdi_writeback *wb, unsigned long thresh) { - struct dirty_throttle_control gdtc = { GDTC_INIT(wb), - .thresh = thresh }; - return __wb_calc_thresh(&gdtc); + struct dirty_throttle_control gdtc = { GDTC_INIT(wb) }; + + return __wb_calc_thresh(&gdtc, thresh); } unsigned long cgwb_calc_thresh(struct bdi_writeback *wb) @@ -908,7 +911,7 @@ unsigned long cgwb_calc_thresh(struct bdi_writeback *wb) mdtc_calc_avail(&mdtc, filepages, headroom); domain_dirty_limits(&mdtc); - return __wb_calc_thresh(&mdtc); + return __wb_calc_thresh(&mdtc, mdtc.thresh); } /* @@ -1655,7 +1658,7 @@ static inline void wb_dirty_limits(struct dirty_throttle_control *dtc) * wb_position_ratio() will let the dirtier task progress * at some rate <= (write_bw / 2) for bringing down wb_dirty. */ - dtc->wb_thresh = __wb_calc_thresh(dtc); + dtc->wb_thresh = __wb_calc_thresh(dtc, dtc->thresh); dtc->wb_bg_thresh = dtc->thresh ? div64_u64(dtc->wb_thresh * dtc->bg_thresh, dtc->thresh) : 0; -- cgit v1.2.3-59-g8ed1b From fabd2e42bc71d78d1e9dcd5af453d1ab86cc2a7e Mon Sep 17 00:00:00 2001 From: Kemeng Shi Date: Thu, 25 Apr 2024 21:17:22 +0800 Subject: mm: correct calculation of wb's bg_thresh in cgroup domain wb_calc_thresh() is calculating wb's share of bg_thresh in the global domain. However in case of cgroup writeback this is not the right thing to do. Consider the following domain hierarchy: global domain (> 20G) / \ cgroup1 (10G) cgroup2 (10G) | | bdi wb1 wb2 and assume wb1 and wb2 have the same bandwidth and the background threshold is set at 10%. The bg_thresh of cgroup1 and cgroup2 is going to be 1G. Now because wb_calc_thresh(mdtc->wb, mdtc->bg_thresh) calculates per-wb threshold in the global domain as (wb bandwidth) / (domain bandwidth) it returns bg_thresh for wb1 as 0.5G although it has nobody to compete against in cgroup1. Fix the problem by calculating wb's share of bg_thresh in the cgroup domain. Test as following: /* make it easier to observe the issue */ echo 300000 > /proc/sys/vm/dirty_expire_centisecs echo 100 > /proc/sys/vm/dirty_writeback_centisecs /* run fio in wb1 */ cd /sys/fs/cgroup echo "+memory +io" > cgroup.subtree_control mkdir group1 cd group1 echo 10G > memory.high echo 10G > memory.max echo $$ > cgroup.procs mkfs.ext4 -F /dev/vdb mount /dev/vdb /bdi1/ fio -name test -filename=/bdi1/file -size=600M -ioengine=libaio -bs=4K \ -iodepth=1 -rw=write -direct=0 --time_based -runtime=600 -invalidate=0 /* run fio in wb2 with a new shell */ cd /sys/fs/cgroup mkdir group2 cd group2 echo 10G > memory.high echo 10G > memory.max echo $$ > cgroup.procs mkfs.ext4 -F /dev/vdc mount /dev/vdc /bdi2/ fio -name test -filename=/bdi2/file -size=600M -ioengine=libaio -bs=4K \ -iodepth=1 -rw=write -direct=0 --time_based -runtime=600 -invalidate=0 Before fix, the wrttien pages of wb1 and wb2 reported from toos/writeback/wb_monitor.py keep growing. After fix, rare written pages are accumulated. There is no obvious change in fio result. [jack@suse.cz: changelog rewording] Link: https://lkml.kernel.org/r/20240425131724.36778-3-shikemeng@huaweicloud.com Fixes: 74d369443325 ("writeback: Fix performance regression in wb_over_bg_thresh()") Signed-off-by: Kemeng Shi Reviewed-by: Jan Kara Cc: Howard Cochran Cc: Jens Axboe Cc: Matthew Wilcox (Oracle) Cc: Miklos Szeredi Cc: Tejun Heo Signed-off-by: Andrew Morton --- mm/page-writeback.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm/page-writeback.c') diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 2a3b68aae336..14893b20d38c 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -2137,7 +2137,7 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb) if (mdtc->dirty > mdtc->bg_thresh) return true; - thresh = wb_calc_thresh(mdtc->wb, mdtc->bg_thresh); + thresh = __wb_calc_thresh(mdtc, mdtc->bg_thresh); if (thresh < 2 * wb_stat_error()) reclaimable = wb_stat_sum(wb, WB_RECLAIMABLE); else -- cgit v1.2.3-59-g8ed1b From 3b3e412e5f4800f3423f7eea6713254d990fe22d Mon Sep 17 00:00:00 2001 From: Kemeng Shi Date: Thu, 25 Apr 2024 21:17:23 +0800 Subject: mm: call __wb_calc_thresh instead of wb_calc_thresh in wb_over_bg_thresh Call __wb_calc_thresh to calculate wb bg_thresh of gdtc in wb_over_bg_thresh to remove unnecessary wrap in wb_calc_thresh. Link: https://lkml.kernel.org/r/20240425131724.36778-4-shikemeng@huaweicloud.com Signed-off-by: Kemeng Shi Reviewed-by: Jan Kara Cc: Howard Cochran Cc: Jens Axboe Cc: Matthew Wilcox (Oracle) Cc: Miklos Szeredi Cc: Tejun Heo Signed-off-by: Andrew Morton --- mm/page-writeback.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'mm/page-writeback.c') diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 14893b20d38c..22e1acec899e 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -2117,7 +2117,7 @@ bool wb_over_bg_thresh(struct bdi_writeback *wb) if (gdtc->dirty > gdtc->bg_thresh) return true; - thresh = wb_calc_thresh(gdtc->wb, gdtc->bg_thresh); + thresh = __wb_calc_thresh(gdtc, gdtc->bg_thresh); if (thresh < 2 * wb_stat_error()) reclaimable = wb_stat_sum(wb, WB_RECLAIMABLE); else -- cgit v1.2.3-59-g8ed1b From dc6e0ae5b1700c54a9c34daf3913adb40b6ddbad Mon Sep 17 00:00:00 2001 From: Kemeng Shi Date: Thu, 25 Apr 2024 21:17:24 +0800 Subject: mm: remove stale comment __folio_mark_dirty The __folio_mark_dirty will not mark inode dirty any longer. Remove the stale comment of it. Link: https://lkml.kernel.org/r/20240425131724.36778-5-shikemeng@huaweicloud.com Signed-off-by: Kemeng Shi Reviewed-by: Matthew Wilcox (Oracle) Reviewed-by: Jan Kara Cc: Howard Cochran Cc: Jens Axboe Cc: Miklos Szeredi Cc: Tejun Heo Signed-off-by: Andrew Morton --- mm/page-writeback.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'mm/page-writeback.c') diff --git a/mm/page-writeback.c b/mm/page-writeback.c index 22e1acec899e..692c0da04cbd 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -2721,8 +2721,7 @@ void folio_account_cleaned(struct folio *folio, struct bdi_writeback *wb) } /* - * Mark the folio dirty, and set it dirty in the page cache, and mark - * the inode dirty. + * Mark the folio dirty, and set it dirty in the page cache. * * If warn is true, then emit a warning if the folio is not uptodate and has * not been truncated. -- cgit v1.2.3-59-g8ed1b