aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/swap.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/swap.h')
-rw-r--r--include/linux/swap.h362
1 files changed, 194 insertions, 168 deletions
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 1e99f7ac1d7e..a18cf4b7c724 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -10,8 +10,10 @@
#include <linux/sched.h>
#include <linux/node.h>
#include <linux/fs.h>
+#include <linux/pagemap.h>
#include <linux/atomic.h>
#include <linux/page-flags.h>
+#include <uapi/linux/mempolicy.h>
#include <asm/page.h>
struct notifier_block;
@@ -53,30 +55,59 @@ static inline int current_is_kswapd(void)
* actions on faults.
*/
+#define SWP_SWAPIN_ERROR_NUM 1
+#define SWP_SWAPIN_ERROR (MAX_SWAPFILES + SWP_HWPOISON_NUM + \
+ SWP_MIGRATION_NUM + SWP_DEVICE_NUM + \
+ SWP_PTE_MARKER_NUM)
+/*
+ * PTE markers are used to persist information onto PTEs that are mapped with
+ * file-backed memories. As its name "PTE" hints, it should only be applied to
+ * the leaves of pgtables.
+ */
+#ifdef CONFIG_PTE_MARKER
+#define SWP_PTE_MARKER_NUM 1
+#define SWP_PTE_MARKER (MAX_SWAPFILES + SWP_HWPOISON_NUM + \
+ SWP_MIGRATION_NUM + SWP_DEVICE_NUM)
+#else
+#define SWP_PTE_MARKER_NUM 0
+#endif
+
/*
* Unaddressable device memory support. See include/linux/hmm.h and
- * Documentation/vm/hmm.rst. Short description is we need struct pages for
+ * Documentation/mm/hmm.rst. Short description is we need struct pages for
* device memory that is unaddressable (inaccessible) by CPU, so that we can
* migrate part of a process memory to device memory.
*
* When a page is migrated from CPU to device, we set the CPU page table entry
- * to a special SWP_DEVICE_* entry.
+ * to a special SWP_DEVICE_{READ|WRITE} entry.
+ *
+ * When a page is mapped by the device for exclusive access we set the CPU page
+ * table entries to special SWP_DEVICE_EXCLUSIVE_* entries.
*/
#ifdef CONFIG_DEVICE_PRIVATE
-#define SWP_DEVICE_NUM 2
+#define SWP_DEVICE_NUM 4
#define SWP_DEVICE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM)
#define SWP_DEVICE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+1)
+#define SWP_DEVICE_EXCLUSIVE_WRITE (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+2)
+#define SWP_DEVICE_EXCLUSIVE_READ (MAX_SWAPFILES+SWP_HWPOISON_NUM+SWP_MIGRATION_NUM+3)
#else
#define SWP_DEVICE_NUM 0
#endif
/*
- * NUMA node memory migration support
+ * Page migration support.
+ *
+ * SWP_MIGRATION_READ_EXCLUSIVE is only applicable to anonymous pages and
+ * indicates that the referenced (part of) an anonymous page is exclusive to
+ * a single process. For SWP_MIGRATION_WRITE, that information is implicit:
+ * (part of) an anonymous page that are mapped writable are exclusive to a
+ * single process.
*/
#ifdef CONFIG_MIGRATION
-#define SWP_MIGRATION_NUM 2
-#define SWP_MIGRATION_READ (MAX_SWAPFILES + SWP_HWPOISON_NUM)
-#define SWP_MIGRATION_WRITE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1)
+#define SWP_MIGRATION_NUM 3
+#define SWP_MIGRATION_READ (MAX_SWAPFILES + SWP_HWPOISON_NUM)
+#define SWP_MIGRATION_READ_EXCLUSIVE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 1)
+#define SWP_MIGRATION_WRITE (MAX_SWAPFILES + SWP_HWPOISON_NUM + 2)
#else
#define SWP_MIGRATION_NUM 0
#endif
@@ -93,7 +124,8 @@ static inline int current_is_kswapd(void)
#define MAX_SWAPFILES \
((1 << MAX_SWAPFILES_SHIFT) - SWP_DEVICE_NUM - \
- SWP_MIGRATION_NUM - SWP_HWPOISON_NUM)
+ SWP_MIGRATION_NUM - SWP_HWPOISON_NUM - \
+ SWP_PTE_MARKER_NUM - SWP_SWAPIN_ERROR_NUM)
/*
* Magic header for a swap area. The first part of the union is
@@ -130,6 +162,10 @@ union swap_header {
*/
struct reclaim_state {
unsigned long reclaimed_slab;
+#ifdef CONFIG_LRU_GEN
+ /* per-thread mm walk data */
+ struct lru_gen_mm_walk *mm_walk;
+#endif
};
#ifdef __KERNEL__
@@ -141,8 +177,8 @@ struct zone;
/*
* A swap extent maps a range of a swapfile's PAGE_SIZE pages onto a range of
- * disk blocks. A list of swap extents maps the entire swapfile. (Where the
- * term `swapfile' refers to either a blockdevice or an IS_REG file. Apart
+ * disk blocks. A rbtree of swap extents maps the entire swapfile (Where the
+ * term `swapfile' refers to either a blockdevice or an IS_REG file). Apart
* from setup, they're handled identically.
*
* We always assume that blocks are of size PAGE_SIZE.
@@ -170,12 +206,11 @@ enum {
SWP_CONTINUED = (1 << 5), /* swap_map has count continuation */
SWP_BLKDEV = (1 << 6), /* its a block device */
SWP_ACTIVATED = (1 << 7), /* set after swap_activate success */
- SWP_FS = (1 << 8), /* swap file goes through fs */
+ SWP_FS_OPS = (1 << 8), /* swapfile operations go through fs */
SWP_AREA_DISCARD = (1 << 9), /* single-time swap area discards */
SWP_PAGE_DISCARD = (1 << 10), /* freed swap page-cluster discards */
SWP_STABLE_WRITES = (1 << 11), /* no overwrite PG_writeback pages */
SWP_SYNCHRONOUS_IO = (1 << 12), /* synchronous IO is efficient */
- SWP_VALID = (1 << 13), /* swap is valid to be operated on? */
/* add others here before... */
SWP_SCANNING = (1 << 14), /* refcount in scan_swap_map */
};
@@ -183,12 +218,17 @@ enum {
#define SWAP_CLUSTER_MAX 32UL
#define COMPACT_CLUSTER_MAX SWAP_CLUSTER_MAX
-#define SWAP_MAP_MAX 0x3e /* Max duplication count, in first swap_map */
-#define SWAP_MAP_BAD 0x3f /* Note pageblock is bad, in first swap_map */
+/* Bit flag in swap_map */
#define SWAP_HAS_CACHE 0x40 /* Flag page is cached, in first swap_map */
-#define SWAP_CONT_MAX 0x7f /* Max count, in each swap_map continuation */
-#define COUNT_CONTINUED 0x80 /* See swap_map continuation for full count */
-#define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs, in first swap_map */
+#define COUNT_CONTINUED 0x80 /* Flag swap_map continuation for full count */
+
+/* Special value in first swap_map */
+#define SWAP_MAP_MAX 0x3e /* Max count */
+#define SWAP_MAP_BAD 0x3f /* Note page is bad */
+#define SWAP_MAP_SHMEM 0xbf /* Owned by shmem/tmpfs */
+
+/* Special value in each swap_map continuation */
+#define SWAP_CONT_MAX 0x7f /* Max count */
/*
* We use this to track usage of a cluster. A cluster is a block of swap disk
@@ -233,6 +273,7 @@ struct swap_cluster_list {
* The in-memory structure used to track swap areas.
*/
struct swap_info_struct {
+ struct percpu_ref users; /* indicate and keep swap device valid. */
unsigned long flags; /* SWP_USED etc: see above */
signed short prio; /* swap priority of this type */
struct plist_node list; /* entry in swap_active_head */
@@ -247,11 +288,13 @@ struct swap_info_struct {
unsigned int inuse_pages; /* number of those currently in use */
unsigned int cluster_next; /* likely index for next allocation */
unsigned int cluster_nr; /* countdown to next cluster search */
+ unsigned int __percpu *cluster_next_cpu; /*percpu index for next allocation */
struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */
struct rb_root swap_extent_root;/* root of the swap extent rbtree */
struct block_device *bdev; /* swap device or bdev of swap file */
struct file *swap_file; /* seldom referenced */
unsigned int old_block_size; /* seldom referenced */
+ struct completion comp; /* seldom referenced */
#ifdef CONFIG_FRONTSWAP
unsigned long *frontswap_map; /* frontswap in-use, one bit per page */
atomic_t frontswap_pages; /* frontswap pages in-use counter */
@@ -275,7 +318,7 @@ struct swap_info_struct {
*/
struct work_struct discard_work; /* discard worker */
struct swap_cluster_list discard_clusters; /* discard clusters list */
- struct plist_node avail_lists[0]; /*
+ struct plist_node avail_lists[]; /*
* entries in swap_avail_heads, one
* entry per node.
* Must be last as the number of the
@@ -306,64 +349,91 @@ struct vma_swap_readahead {
#endif
};
+static inline swp_entry_t folio_swap_entry(struct folio *folio)
+{
+ swp_entry_t entry = { .val = page_private(&folio->page) };
+ return entry;
+}
+
+static inline void folio_set_swap_entry(struct folio *folio, swp_entry_t entry)
+{
+ folio->private = (void *)entry.val;
+}
+
/* linux/mm/workingset.c */
-void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg);
-void workingset_refault(struct page *page, void *shadow);
-void workingset_activation(struct page *page);
+void workingset_age_nonresident(struct lruvec *lruvec, unsigned long nr_pages);
+void *workingset_eviction(struct folio *folio, struct mem_cgroup *target_memcg);
+void workingset_refault(struct folio *folio, void *shadow);
+void workingset_activation(struct folio *folio);
/* Only track the nodes of mappings with shadow entries */
void workingset_update_node(struct xa_node *node);
+extern struct list_lru shadow_nodes;
#define mapping_set_update(xas, mapping) do { \
- if (!dax_mapping(mapping) && !shmem_mapping(mapping)) \
+ if (!dax_mapping(mapping) && !shmem_mapping(mapping)) { \
xas_set_update(xas, workingset_update_node); \
+ xas_set_lru(xas, &shadow_nodes); \
+ } \
} while (0)
/* linux/mm/page_alloc.c */
extern unsigned long totalreserve_pages;
-extern unsigned long nr_free_buffer_pages(void);
-extern unsigned long nr_free_pagecache_pages(void);
/* Definition of global_zone_page_state not available yet */
#define nr_free_pages() global_zone_page_state(NR_FREE_PAGES)
/* linux/mm/swap.c */
-extern void lru_cache_add(struct page *);
-extern void lru_cache_add_anon(struct page *page);
-extern void lru_cache_add_file(struct page *page);
-extern void lru_add_page_tail(struct page *page, struct page *page_tail,
- struct lruvec *lruvec, struct list_head *head);
-extern void activate_page(struct page *);
-extern void mark_page_accessed(struct page *);
+void lru_note_cost(struct lruvec *lruvec, bool file, unsigned int nr_pages);
+void lru_note_cost_folio(struct folio *);
+void folio_add_lru(struct folio *);
+void folio_add_lru_vma(struct folio *, struct vm_area_struct *);
+void lru_cache_add(struct page *);
+void mark_page_accessed(struct page *);
+void folio_mark_accessed(struct folio *);
+
+extern atomic_t lru_disable_count;
+
+static inline bool lru_cache_disabled(void)
+{
+ return atomic_read(&lru_disable_count);
+}
+
+static inline void lru_cache_enable(void)
+{
+ atomic_dec(&lru_disable_count);
+}
+
+extern void lru_cache_disable(void);
extern void lru_add_drain(void);
extern void lru_add_drain_cpu(int cpu);
+extern void lru_add_drain_cpu_zone(struct zone *zone);
extern void lru_add_drain_all(void);
-extern void rotate_reclaimable_page(struct page *page);
-extern void deactivate_file_page(struct page *page);
extern void deactivate_page(struct page *page);
extern void mark_page_lazyfree(struct page *page);
extern void swap_setup(void);
-extern void lru_cache_add_active_or_unevictable(struct page *page,
+extern void lru_cache_add_inactive_or_unevictable(struct page *page,
struct vm_area_struct *vma);
/* linux/mm/vmscan.c */
extern unsigned long zone_reclaimable_pages(struct zone *zone);
extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
gfp_t gfp_mask, nodemask_t *mask);
-extern int __isolate_lru_page(struct page *page, isolate_mode_t mode);
+
+#define MEMCG_RECLAIM_MAY_SWAP (1 << 1)
+#define MEMCG_RECLAIM_PROACTIVE (1 << 2)
extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
unsigned long nr_pages,
gfp_t gfp_mask,
- bool may_swap);
+ unsigned int reclaim_options);
extern unsigned long mem_cgroup_shrink_node(struct mem_cgroup *mem,
gfp_t gfp_mask, bool noswap,
pg_data_t *pgdat,
unsigned long *nr_scanned);
extern unsigned long shrink_all_memory(unsigned long nr_pages);
extern int vm_swappiness;
-extern int remove_mapping(struct address_space *mapping, struct page *page);
-extern unsigned long vm_total_pages;
+long remove_mapping(struct address_space *mapping, struct folio *folio);
extern unsigned long reclaim_pages(struct list_head *page_list);
#ifdef CONFIG_NUMA
@@ -374,60 +444,33 @@ extern int sysctl_min_slab_ratio;
#define node_reclaim_mode 0
#endif
-extern int page_evictable(struct page *page);
-extern void check_move_unevictable_pages(struct pagevec *pvec);
+static inline bool node_reclaim_enabled(void)
+{
+ /* Is any node_reclaim_mode bit set? */
+ return node_reclaim_mode & (RECLAIM_ZONE|RECLAIM_WRITE|RECLAIM_UNMAP);
+}
-extern int kswapd_run(int nid);
+void check_move_unevictable_folios(struct folio_batch *fbatch);
+void check_move_unevictable_pages(struct pagevec *pvec);
+
+extern void kswapd_run(int nid);
extern void kswapd_stop(int nid);
#ifdef CONFIG_SWAP
-#include <linux/blk_types.h> /* for bio_end_io_t */
-
-/* linux/mm/page_io.c */
-extern int swap_readpage(struct page *page, bool do_poll);
-extern int swap_writepage(struct page *page, struct writeback_control *wbc);
-extern void end_swap_bio_write(struct bio *bio);
-extern int __swap_writepage(struct page *page, struct writeback_control *wbc,
- bio_end_io_t end_write_func);
-extern int swap_set_page_dirty(struct page *page);
-
int add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
unsigned long nr_pages, sector_t start_block);
int generic_swapfile_activate(struct swap_info_struct *, struct file *,
sector_t *);
-/* linux/mm/swap_state.c */
-/* One swap address space for each 64M swap space */
-#define SWAP_ADDRESS_SPACE_SHIFT 14
-#define SWAP_ADDRESS_SPACE_PAGES (1 << SWAP_ADDRESS_SPACE_SHIFT)
-extern struct address_space *swapper_spaces[];
-#define swap_address_space(entry) \
- (&swapper_spaces[swp_type(entry)][swp_offset(entry) \
- >> SWAP_ADDRESS_SPACE_SHIFT])
-extern unsigned long total_swapcache_pages(void);
-extern void show_swap_cache_info(void);
-extern int add_to_swap(struct page *page);
-extern int add_to_swap_cache(struct page *, swp_entry_t, gfp_t);
-extern int __add_to_swap_cache(struct page *page, swp_entry_t entry);
-extern void __delete_from_swap_cache(struct page *, swp_entry_t entry);
-extern void delete_from_swap_cache(struct page *);
+static inline unsigned long total_swapcache_pages(void)
+{
+ return global_node_page_state(NR_SWAPCACHE);
+}
+
+extern void free_swap_cache(struct page *page);
extern void free_page_and_swap_cache(struct page *);
extern void free_pages_and_swap_cache(struct page **, int);
-extern struct page *lookup_swap_cache(swp_entry_t entry,
- struct vm_area_struct *vma,
- unsigned long addr);
-extern struct page *read_swap_cache_async(swp_entry_t, gfp_t,
- struct vm_area_struct *vma, unsigned long addr,
- bool do_poll);
-extern struct page *__read_swap_cache_async(swp_entry_t, gfp_t,
- struct vm_area_struct *vma, unsigned long addr,
- bool *new_page_allocated);
-extern struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t flag,
- struct vm_fault *vmf);
-extern struct page *swapin_readahead(swp_entry_t entry, gfp_t flag,
- struct vm_fault *vmf);
-
/* linux/mm/swapfile.c */
extern atomic_long_t nr_swap_pages;
extern long total_swap_pages;
@@ -446,8 +489,9 @@ static inline long get_nr_swap_pages(void)
}
extern void si_swapinfo(struct sysinfo *);
-extern swp_entry_t get_swap_page(struct page *page);
-extern void put_swap_page(struct page *page, swp_entry_t entry);
+swp_entry_t folio_alloc_swap(struct folio *folio);
+bool folio_free_swap(struct folio *folio);
+void put_swap_folio(struct folio *folio, swp_entry_t entry);
extern swp_entry_t get_swap_page_of_type(int);
extern int get_swap_pages(int n, swp_entry_t swp_entries[], int entry_size);
extern int add_swap_count_continuation(swp_entry_t, gfp_t);
@@ -457,41 +501,41 @@ extern int swapcache_prepare(swp_entry_t);
extern void swap_free(swp_entry_t);
extern void swapcache_free_entries(swp_entry_t *entries, int n);
extern int free_swap_and_cache(swp_entry_t);
-extern int swap_type_of(dev_t, sector_t, struct block_device **);
+int swap_type_of(dev_t device, sector_t offset);
+int find_first_swap(dev_t *device);
extern unsigned int count_swap_pages(int, int);
-extern sector_t map_swap_page(struct page *, struct block_device **);
extern sector_t swapdev_block(int, pgoff_t);
-extern int page_swapcount(struct page *);
extern int __swap_count(swp_entry_t entry);
extern int __swp_swapcount(swp_entry_t entry);
extern int swp_swapcount(swp_entry_t entry);
extern struct swap_info_struct *page_swap_info(struct page *);
extern struct swap_info_struct *swp_swap_info(swp_entry_t entry);
-extern bool reuse_swap_page(struct page *, int *);
-extern int try_to_free_swap(struct page *);
struct backing_dev_info;
extern int init_swap_address_space(unsigned int type, unsigned long nr_pages);
extern void exit_swap_address_space(unsigned int type);
extern struct swap_info_struct *get_swap_device(swp_entry_t entry);
+sector_t swap_page_sector(struct page *page);
static inline void put_swap_device(struct swap_info_struct *si)
{
- rcu_read_unlock();
+ percpu_ref_put(&si->users);
}
#else /* CONFIG_SWAP */
-
-static inline int swap_readpage(struct page *page, bool do_poll)
+static inline struct swap_info_struct *swp_swap_info(swp_entry_t entry)
{
- return 0;
+ return NULL;
}
-static inline struct swap_info_struct *swp_swap_info(swp_entry_t entry)
+static inline struct swap_info_struct *get_swap_device(swp_entry_t entry)
{
return NULL;
}
-#define swap_address_space(entry) (NULL)
+static inline void put_swap_device(struct swap_info_struct *si)
+{
+}
+
#define get_nr_swap_pages() 0L
#define total_swap_pages 0L
#define total_swapcache_pages() 0UL
@@ -506,13 +550,13 @@ static inline struct swap_info_struct *swp_swap_info(swp_entry_t entry)
#define free_pages_and_swap_cache(pages, nr) \
release_pages((pages), (nr));
-static inline void show_swap_cache_info(void)
+/* used to sanity check ptes in zap_pte_range when CONFIG_SWAP=0 */
+#define free_swap_and_cache(e) is_pfn_swap_entry(e)
+
+static inline void free_swap_cache(struct page *page)
{
}
-#define free_swap_and_cache(e) ({(is_migration_entry(e) || is_device_private_entry(e));})
-#define swapcache_prepare(e) ({(is_migration_entry(e) || is_device_private_entry(e));})
-
static inline int add_swap_count_continuation(swp_entry_t swp, gfp_t gfp_mask)
{
return 0;
@@ -531,57 +575,8 @@ static inline void swap_free(swp_entry_t swp)
{
}
-static inline void put_swap_page(struct page *page, swp_entry_t swp)
-{
-}
-
-static inline struct page *swap_cluster_readahead(swp_entry_t entry,
- gfp_t gfp_mask, struct vm_fault *vmf)
+static inline void put_swap_folio(struct folio *folio, swp_entry_t swp)
{
- return NULL;
-}
-
-static inline struct page *swapin_readahead(swp_entry_t swp, gfp_t gfp_mask,
- struct vm_fault *vmf)
-{
- return NULL;
-}
-
-static inline int swap_writepage(struct page *p, struct writeback_control *wbc)
-{
- return 0;
-}
-
-static inline struct page *lookup_swap_cache(swp_entry_t swp,
- struct vm_area_struct *vma,
- unsigned long addr)
-{
- return NULL;
-}
-
-static inline int add_to_swap(struct page *page)
-{
- return 0;
-}
-
-static inline int add_to_swap_cache(struct page *page, swp_entry_t entry,
- gfp_t gfp_mask)
-{
- return -1;
-}
-
-static inline void __delete_from_swap_cache(struct page *page,
- swp_entry_t entry)
-{
-}
-
-static inline void delete_from_swap_cache(struct page *page)
-{
-}
-
-static inline int page_swapcount(struct page *page)
-{
- return 0;
}
static inline int __swap_count(swp_entry_t entry)
@@ -599,21 +594,24 @@ static inline int swp_swapcount(swp_entry_t entry)
return 0;
}
-#define reuse_swap_page(page, total_map_swapcount) \
- (page_trans_huge_mapcount(page, total_map_swapcount) == 1)
-
-static inline int try_to_free_swap(struct page *page)
-{
- return 0;
-}
-
-static inline swp_entry_t get_swap_page(struct page *page)
+static inline swp_entry_t folio_alloc_swap(struct folio *folio)
{
swp_entry_t entry;
entry.val = 0;
return entry;
}
+static inline bool folio_free_swap(struct folio *folio)
+{
+ return false;
+}
+
+static inline int add_swap_extent(struct swap_info_struct *sis,
+ unsigned long start_page,
+ unsigned long nr_pages, sector_t start_block)
+{
+ return -EINVAL;
+}
#endif /* CONFIG_SWAP */
#ifdef CONFIG_THP_SWAP
@@ -645,28 +643,56 @@ static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
}
#endif
+#ifdef CONFIG_ZSWAP
+extern u64 zswap_pool_total_size;
+extern atomic_t zswap_stored_pages;
+#endif
+
#if defined(CONFIG_SWAP) && defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
-extern void mem_cgroup_throttle_swaprate(struct mem_cgroup *memcg, int node,
- gfp_t gfp_mask);
+extern void __cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask);
+static inline void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask)
+{
+ if (mem_cgroup_disabled())
+ return;
+ __cgroup_throttle_swaprate(page, gfp_mask);
+}
#else
-static inline void mem_cgroup_throttle_swaprate(struct mem_cgroup *memcg,
- int node, gfp_t gfp_mask)
+static inline void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask)
{
}
#endif
+static inline void folio_throttle_swaprate(struct folio *folio, gfp_t gfp)
+{
+ cgroup_throttle_swaprate(&folio->page, gfp);
+}
+
+#if defined(CONFIG_MEMCG) && defined(CONFIG_SWAP)
+void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry);
+int __mem_cgroup_try_charge_swap(struct folio *folio, swp_entry_t entry);
+static inline int mem_cgroup_try_charge_swap(struct folio *folio,
+ swp_entry_t entry)
+{
+ if (mem_cgroup_disabled())
+ return 0;
+ return __mem_cgroup_try_charge_swap(folio, entry);
+}
+
+extern void __mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages);
+static inline void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages)
+{
+ if (mem_cgroup_disabled())
+ return;
+ __mem_cgroup_uncharge_swap(entry, nr_pages);
+}
-#ifdef CONFIG_MEMCG_SWAP
-extern void mem_cgroup_swapout(struct page *page, swp_entry_t entry);
-extern int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry);
-extern void mem_cgroup_uncharge_swap(swp_entry_t entry, unsigned int nr_pages);
extern long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg);
-extern bool mem_cgroup_swap_full(struct page *page);
+extern bool mem_cgroup_swap_full(struct folio *folio);
#else
-static inline void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
+static inline void mem_cgroup_swapout(struct folio *folio, swp_entry_t entry)
{
}
-static inline int mem_cgroup_try_charge_swap(struct page *page,
+static inline int mem_cgroup_try_charge_swap(struct folio *folio,
swp_entry_t entry)
{
return 0;
@@ -682,7 +708,7 @@ static inline long mem_cgroup_get_nr_swap_pages(struct mem_cgroup *memcg)
return get_nr_swap_pages();
}
-static inline bool mem_cgroup_swap_full(struct page *page)
+static inline bool mem_cgroup_swap_full(struct folio *folio)
{
return vm_swap_full();
}