From 80c545055dc7c1f7f487176fe0aac17896a4b7af Mon Sep 17 00:00:00 2001 From: Jaegeuk Kim Date: Thu, 20 Aug 2015 08:51:56 -0700 Subject: f2fs: use __GFP_NOFAIL to avoid infinite loop __GFP_NOFAIL can avoid retrying the whole path of kmem_cache_alloc and bio_alloc. And, it also fixes the use cases of GFP_ATOMIC correctly. Suggested-by: Chao Yu Reviewed-by: Chao Yu Signed-off-by: Jaegeuk Kim --- fs/f2fs/checkpoint.c | 21 ++++++++------------- fs/f2fs/f2fs.h | 16 +++++----------- fs/f2fs/node.c | 4 ++-- fs/f2fs/segment.c | 2 +- 4 files changed, 16 insertions(+), 27 deletions(-) (limited to 'fs') diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c index 890e4d4c39d7..c5a38e352a80 100644 --- a/fs/f2fs/checkpoint.c +++ b/fs/f2fs/checkpoint.c @@ -336,26 +336,18 @@ const struct address_space_operations f2fs_meta_aops = { static void __add_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type) { struct inode_management *im = &sbi->im[type]; - struct ino_entry *e; + struct ino_entry *e, *tmp; + + tmp = f2fs_kmem_cache_alloc(ino_entry_slab, GFP_NOFS); retry: - if (radix_tree_preload(GFP_NOFS)) { - cond_resched(); - goto retry; - } + radix_tree_preload(GFP_NOFS | __GFP_NOFAIL); spin_lock(&im->ino_lock); - e = radix_tree_lookup(&im->ino_root, ino); if (!e) { - e = kmem_cache_alloc(ino_entry_slab, GFP_ATOMIC); - if (!e) { - spin_unlock(&im->ino_lock); - radix_tree_preload_end(); - goto retry; - } + e = tmp; if (radix_tree_insert(&im->ino_root, ino, e)) { spin_unlock(&im->ino_lock); - kmem_cache_free(ino_entry_slab, e); radix_tree_preload_end(); goto retry; } @@ -368,6 +360,9 @@ retry: } spin_unlock(&im->ino_lock); radix_tree_preload_end(); + + if (e != tmp) + kmem_cache_free(ino_entry_slab, tmp); } static void __remove_ino_entry(struct f2fs_sb_info *sbi, nid_t ino, int type) diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index 66410178aba1..ece5e704dfd0 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -1252,13 +1252,10 @@ static inline void *f2fs_kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags) { void *entry; -retry: - entry = kmem_cache_alloc(cachep, flags); - if (!entry) { - cond_resched(); - goto retry; - } + entry = kmem_cache_alloc(cachep, flags); + if (!entry) + entry = kmem_cache_alloc(cachep, flags | __GFP_NOFAIL); return entry; } @@ -1267,12 +1264,9 @@ static inline struct bio *f2fs_bio_alloc(int npages) struct bio *bio; /* No failure on bio allocation */ -retry: bio = bio_alloc(GFP_NOIO, npages); - if (!bio) { - cond_resched(); - goto retry; - } + if (!bio) + bio = bio_alloc(GFP_NOIO | __GFP_NOFAIL, npages); return bio; } diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c index 6bef5a2788b4..777066d29fa8 100644 --- a/fs/f2fs/node.c +++ b/fs/f2fs/node.c @@ -159,7 +159,7 @@ static void __set_nat_cache_dirty(struct f2fs_nm_info *nm_i, head = radix_tree_lookup(&nm_i->nat_set_root, set); if (!head) { - head = f2fs_kmem_cache_alloc(nat_entry_set_slab, GFP_ATOMIC); + head = f2fs_kmem_cache_alloc(nat_entry_set_slab, GFP_NOFS); INIT_LIST_HEAD(&head->entry_list); INIT_LIST_HEAD(&head->set_list); @@ -246,7 +246,7 @@ static struct nat_entry *grab_nat_entry(struct f2fs_nm_info *nm_i, nid_t nid) { struct nat_entry *new; - new = f2fs_kmem_cache_alloc(nat_entry_slab, GFP_ATOMIC); + new = f2fs_kmem_cache_alloc(nat_entry_slab, GFP_NOFS); f2fs_radix_tree_insert(&nm_i->nat_root, nid, new); memset(new, 0, sizeof(struct nat_entry)); nat_set_nid(new, nid); diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c index 6273e2cde93e..78e6d0696847 100644 --- a/fs/f2fs/segment.c +++ b/fs/f2fs/segment.c @@ -1753,7 +1753,7 @@ static struct page *get_next_sit_page(struct f2fs_sb_info *sbi, static struct sit_entry_set *grab_sit_entry_set(void) { struct sit_entry_set *ses = - f2fs_kmem_cache_alloc(sit_entry_set_slab, GFP_ATOMIC); + f2fs_kmem_cache_alloc(sit_entry_set_slab, GFP_NOFS); ses->entry_cnt = 0; INIT_LIST_HEAD(&ses->set_list); -- cgit v1.2.3-59-g8ed1b