aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/bootmem.c24
-rw-r--r--mm/compaction.c6
-rw-r--r--mm/huge_memory.c15
-rw-r--r--mm/memblock.c3
-rw-r--r--mm/migrate.c14
-rw-r--r--mm/mmap.c2
-rw-r--r--mm/page_alloc.c2
7 files changed, 50 insertions, 16 deletions
diff --git a/mm/bootmem.c b/mm/bootmem.c
index 1324cd74faec..b93376c39b61 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -185,10 +185,23 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
while (start < end) {
unsigned long *map, idx, vec;
+ unsigned shift;
map = bdata->node_bootmem_map;
idx = start - bdata->node_min_pfn;
+ shift = idx & (BITS_PER_LONG - 1);
+ /*
+ * vec holds at most BITS_PER_LONG map bits,
+ * bit 0 corresponds to start.
+ */
vec = ~map[idx / BITS_PER_LONG];
+
+ if (shift) {
+ vec >>= shift;
+ if (end - start >= BITS_PER_LONG)
+ vec |= ~map[idx / BITS_PER_LONG + 1] <<
+ (BITS_PER_LONG - shift);
+ }
/*
* If we have a properly aligned and fully unreserved
* BITS_PER_LONG block of pages in front of us, free
@@ -201,19 +214,18 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
count += BITS_PER_LONG;
start += BITS_PER_LONG;
} else {
- unsigned long off = 0;
+ unsigned long cur = start;
- vec >>= start & (BITS_PER_LONG - 1);
- while (vec) {
+ start = ALIGN(start + 1, BITS_PER_LONG);
+ while (vec && cur != start) {
if (vec & 1) {
- page = pfn_to_page(start + off);
+ page = pfn_to_page(cur);
__free_pages_bootmem(page, 0);
count++;
}
vec >>= 1;
- off++;
+ ++cur;
}
- start = ALIGN(start + 1, BITS_PER_LONG);
}
}
diff --git a/mm/compaction.c b/mm/compaction.c
index 2c570432aa56..c62bd063d766 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -1144,7 +1144,7 @@ static int compact_node(int nid)
}
/* Compact all nodes in the system */
-static int compact_nodes(void)
+static void compact_nodes(void)
{
int nid;
@@ -1153,8 +1153,6 @@ static int compact_nodes(void)
for_each_online_node(nid)
compact_node(nid);
-
- return COMPACT_COMPLETE;
}
/* The written value is actually unused, all memory is compacted */
@@ -1165,7 +1163,7 @@ int sysctl_compaction_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *length, loff_t *ppos)
{
if (write)
- return compact_nodes();
+ compact_nodes();
return 0;
}
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 9e894edc7811..6001ee6347a9 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1819,9 +1819,19 @@ int split_huge_page(struct page *page)
BUG_ON(is_huge_zero_pfn(page_to_pfn(page)));
BUG_ON(!PageAnon(page));
- anon_vma = page_lock_anon_vma_read(page);
+
+ /*
+ * The caller does not necessarily hold an mmap_sem that would prevent
+ * the anon_vma disappearing so we first we take a reference to it
+ * and then lock the anon_vma for write. This is similar to
+ * page_lock_anon_vma_read except the write lock is taken to serialise
+ * against parallel split or collapse operations.
+ */
+ anon_vma = page_get_anon_vma(page);
if (!anon_vma)
goto out;
+ anon_vma_lock_write(anon_vma);
+
ret = 0;
if (!PageCompound(page))
goto out_unlock;
@@ -1832,7 +1842,8 @@ int split_huge_page(struct page *page)
BUG_ON(PageCompound(page));
out_unlock:
- page_unlock_anon_vma_read(anon_vma);
+ anon_vma_unlock(anon_vma);
+ put_anon_vma(anon_vma);
out:
return ret;
}
diff --git a/mm/memblock.c b/mm/memblock.c
index 625905523c2a..88adc8afb610 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -314,7 +314,8 @@ static void __init_memblock memblock_merge_regions(struct memblock_type *type)
}
this->size += next->size;
- memmove(next, next + 1, (type->cnt - (i + 1)) * sizeof(*next));
+ /* move forward from next + 1, index of which is i + 2 */
+ memmove(next, next + 1, (type->cnt - (i + 2)) * sizeof(*next));
type->cnt--;
}
}
diff --git a/mm/migrate.c b/mm/migrate.c
index 3b676b0c5c3e..c38778610aa8 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1679,9 +1679,21 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
page_xchg_last_nid(new_page, page_last_nid(page));
isolated = numamigrate_isolate_page(pgdat, page);
- if (!isolated) {
+
+ /*
+ * Failing to isolate or a GUP pin prevents migration. The expected
+ * page count is 2. 1 for anonymous pages without a mapping and 1
+ * for the callers pin. If the page was isolated, the page will
+ * need to be put back on the LRU.
+ */
+ if (!isolated || page_count(page) != 2) {
count_vm_events(PGMIGRATE_FAIL, HPAGE_PMD_NR);
put_page(new_page);
+ if (isolated) {
+ putback_lru_page(page);
+ isolated = 0;
+ goto out;
+ }
goto out_keep_locked;
}
diff --git a/mm/mmap.c b/mm/mmap.c
index f54b235f29a9..35730ee9d515 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2886,7 +2886,7 @@ static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
* The LSB of head.next can't change from under us
* because we hold the mm_all_locks_mutex.
*/
- down_write(&anon_vma->root->rwsem);
+ down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_sem);
/*
* We can safely modify head.next after taking the
* anon_vma->root->rwsem. If some other vma in this mm shares
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index ece7b8e8a5b6..df2022ff0c8a 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -5585,7 +5585,7 @@ static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
pfn &= (PAGES_PER_SECTION-1);
return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
#else
- pfn = pfn - zone->zone_start_pfn;
+ pfn = pfn - round_down(zone->zone_start_pfn, pageblock_nr_pages);
return (pfn >> pageblock_order) * NR_PAGEBLOCK_BITS;
#endif /* CONFIG_SPARSEMEM */
}