aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c20
-rw-r--r--mm/hugetlb.c2
-rw-r--r--mm/mmap.c72
-rw-r--r--mm/page_alloc.c10
4 files changed, 79 insertions, 25 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 8332c77b1bd1..f30ef28405d3 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -606,26 +606,6 @@ struct page * find_get_page(struct address_space *mapping, unsigned long offset)
EXPORT_SYMBOL(find_get_page);
/**
- * find_trylock_page - find and lock a page
- * @mapping: the address_space to search
- * @offset: the page index
- *
- * Same as find_get_page(), but trylock it instead of incrementing the count.
- */
-struct page *find_trylock_page(struct address_space *mapping, unsigned long offset)
-{
- struct page *page;
-
- read_lock_irq(&mapping->tree_lock);
- page = radix_tree_lookup(&mapping->page_tree, offset);
- if (page && TestSetPageLocked(page))
- page = NULL;
- read_unlock_irq(&mapping->tree_lock);
- return page;
-}
-EXPORT_SYMBOL(find_trylock_page);
-
-/**
* find_lock_page - locate, pin and lock a pagecache page
* @mapping: the address_space to search
* @offset: the page index
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index cb362f761f17..36db012b38dd 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -389,6 +389,8 @@ void __unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
continue;
page = pte_page(pte);
+ if (pte_dirty(pte))
+ set_page_dirty(page);
list_add(&page->lru, &page_list);
}
spin_unlock(&mm->page_table_lock);
diff --git a/mm/mmap.c b/mm/mmap.c
index cc3a20819457..eb509ae76553 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2101,3 +2101,75 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
return 0;
return 1;
}
+
+
+static struct page *special_mapping_nopage(struct vm_area_struct *vma,
+ unsigned long address, int *type)
+{
+ struct page **pages;
+
+ BUG_ON(address < vma->vm_start || address >= vma->vm_end);
+
+ address -= vma->vm_start;
+ for (pages = vma->vm_private_data; address > 0 && *pages; ++pages)
+ address -= PAGE_SIZE;
+
+ if (*pages) {
+ struct page *page = *pages;
+ get_page(page);
+ return page;
+ }
+
+ return NOPAGE_SIGBUS;
+}
+
+/*
+ * Having a close hook prevents vma merging regardless of flags.
+ */
+static void special_mapping_close(struct vm_area_struct *vma)
+{
+}
+
+static struct vm_operations_struct special_mapping_vmops = {
+ .close = special_mapping_close,
+ .nopage = special_mapping_nopage,
+};
+
+/*
+ * Called with mm->mmap_sem held for writing.
+ * Insert a new vma covering the given region, with the given flags.
+ * Its pages are supplied by the given array of struct page *.
+ * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated.
+ * The region past the last page supplied will always produce SIGBUS.
+ * The array pointer and the pages it points to are assumed to stay alive
+ * for as long as this mapping might exist.
+ */
+int install_special_mapping(struct mm_struct *mm,
+ unsigned long addr, unsigned long len,
+ unsigned long vm_flags, struct page **pages)
+{
+ struct vm_area_struct *vma;
+
+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
+ if (unlikely(vma == NULL))
+ return -ENOMEM;
+
+ vma->vm_mm = mm;
+ vma->vm_start = addr;
+ vma->vm_end = addr + len;
+
+ vma->vm_flags = vm_flags | mm->def_flags;
+ vma->vm_page_prot = protection_map[vma->vm_flags & 7];
+
+ vma->vm_ops = &special_mapping_vmops;
+ vma->vm_private_data = pages;
+
+ if (unlikely(insert_vm_struct(mm, vma))) {
+ kmem_cache_free(vm_area_cachep, vma);
+ return -ENOMEM;
+ }
+
+ mm->total_vm += len >> PAGE_SHIFT;
+
+ return 0;
+}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index fc5b5442e942..f12052dc23ff 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -989,8 +989,7 @@ int zone_watermark_ok(struct zone *z, int order, unsigned long mark,
int classzone_idx, int alloc_flags)
{
/* free_pages my go negative - that's OK */
- unsigned long min = mark;
- long free_pages = z->free_pages - (1 << order) + 1;
+ long min = mark, free_pages = z->free_pages - (1 << order) + 1;
int o;
if (alloc_flags & ALLOC_HIGH)
@@ -1580,8 +1579,8 @@ void show_free_areas(void)
get_zone_counts(&active, &inactive, &free);
- printk("Active:%lu inactive:%lu dirty:%lu writeback:%lu "
- "unstable:%lu free:%u slab:%lu mapped:%lu pagetables:%lu\n",
+ printk("Active:%lu inactive:%lu dirty:%lu writeback:%lu unstable:%lu\n"
+ " free:%u slab:%lu mapped:%lu pagetables:%lu bounce:%lu\n",
active,
inactive,
global_page_state(NR_FILE_DIRTY),
@@ -1591,7 +1590,8 @@ void show_free_areas(void)
global_page_state(NR_SLAB_RECLAIMABLE) +
global_page_state(NR_SLAB_UNRECLAIMABLE),
global_page_state(NR_FILE_MAPPED),
- global_page_state(NR_PAGETABLE));
+ global_page_state(NR_PAGETABLE),
+ global_page_state(NR_BOUNCE));
for_each_zone(zone) {
int i;