From 94ae8ba7176666d1e7d8bbb9f93670a27540b6a8 Mon Sep 17 00:00:00 2001 From: "Aneesh Kumar K.V" Date: Tue, 31 Jul 2012 16:42:35 -0700 Subject: hugetlb/cgroup: assign the page hugetlb cgroup when we move the page to active list. A page's hugetlb cgroup assignment and movement to the active list should occur with hugetlb_lock held. Otherwise when we remove the hugetlb cgroup we will iterate the active list and find pages with NULL hugetlb cgroup values. Signed-off-by: Aneesh Kumar K.V Reviewed-by: Michal Hocko Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- mm/hugetlb.c | 22 ++++++++++------------ mm/hugetlb_cgroup.c | 5 +++-- 2 files changed, 13 insertions(+), 14 deletions(-) diff --git a/mm/hugetlb.c b/mm/hugetlb.c index ec7b86ebf9d9..c39e4beeb63a 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -928,14 +928,8 @@ struct page *alloc_huge_page_node(struct hstate *h, int nid) page = dequeue_huge_page_node(h, nid); spin_unlock(&hugetlb_lock); - if (!page) { + if (!page) page = alloc_buddy_huge_page(h, nid); - if (page) { - spin_lock(&hugetlb_lock); - list_move(&page->lru, &h->hugepage_activelist); - spin_unlock(&hugetlb_lock); - } - } return page; } @@ -1150,9 +1144,13 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma, } spin_lock(&hugetlb_lock); page = dequeue_huge_page_vma(h, vma, addr, avoid_reserve); - spin_unlock(&hugetlb_lock); - - if (!page) { + if (page) { + /* update page cgroup details */ + hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), + h_cg, page); + spin_unlock(&hugetlb_lock); + } else { + spin_unlock(&hugetlb_lock); page = alloc_buddy_huge_page(h, NUMA_NO_NODE); if (!page) { hugetlb_cgroup_uncharge_cgroup(idx, @@ -1162,6 +1160,8 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma, return ERR_PTR(-ENOSPC); } spin_lock(&hugetlb_lock); + hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), + h_cg, page); list_move(&page->lru, &h->hugepage_activelist); spin_unlock(&hugetlb_lock); } @@ -1169,8 +1169,6 @@ static struct page *alloc_huge_page(struct vm_area_struct *vma, set_page_private(page, (unsigned long)spool); vma_commit_reservation(h, vma, addr); - /* update page cgroup details */ - hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, page); return page; } diff --git a/mm/hugetlb_cgroup.c b/mm/hugetlb_cgroup.c index 680e4819e077..9834a01c79dc 100644 --- a/mm/hugetlb_cgroup.c +++ b/mm/hugetlb_cgroup.c @@ -213,6 +213,7 @@ done: return ret; } +/* Should be called with hugetlb_lock held */ void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages, struct hugetlb_cgroup *h_cg, struct page *page) @@ -220,9 +221,7 @@ void hugetlb_cgroup_commit_charge(int idx, unsigned long nr_pages, if (hugetlb_cgroup_disabled() || !h_cg) return; - spin_lock(&hugetlb_lock); set_hugetlb_cgroup(page, h_cg); - spin_unlock(&hugetlb_lock); return; } @@ -389,6 +388,7 @@ int __init hugetlb_cgroup_file_init(int idx) void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage) { struct hugetlb_cgroup *h_cg; + struct hstate *h = page_hstate(oldhpage); if (hugetlb_cgroup_disabled()) return; @@ -401,6 +401,7 @@ void hugetlb_cgroup_migrate(struct page *oldhpage, struct page *newhpage) /* move the h_cg details to new cgroup */ set_hugetlb_cgroup(newhpage, h_cg); + list_move(&newhpage->lru, &h->hugepage_activelist); spin_unlock(&hugetlb_lock); cgroup_release_and_wakeup_rmdir(&h_cg->css); return; -- cgit v1.2.3-59-g8ed1b