aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--mm/filemap.c30
-rw-r--r--mm/nommu.c1
-rw-r--r--mm/shmem.c15
-rw-r--r--mm/slub.c2
-rw-r--r--mm/sparse-vmemmap.c1
-rw-r--r--mm/sparse.c11
6 files changed, 27 insertions, 33 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 5209e47b7fe3..188cf5fd3e8d 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -28,6 +28,7 @@
#include <linux/backing-dev.h>
#include <linux/pagevec.h>
#include <linux/blkdev.h>
+#include <linux/backing-dev.h>
#include <linux/security.h>
#include <linux/syscalls.h>
#include <linux/cpuset.h>
@@ -1299,7 +1300,7 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
size = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
if (vmf->pgoff >= size)
- goto outside_data_content;
+ return VM_FAULT_SIGBUS;
/* If we don't want any read-ahead, don't bother */
if (VM_RandomReadHint(vma))
@@ -1376,7 +1377,7 @@ retry_find:
if (unlikely(vmf->pgoff >= size)) {
unlock_page(page);
page_cache_release(page);
- goto outside_data_content;
+ return VM_FAULT_SIGBUS;
}
/*
@@ -1387,15 +1388,6 @@ retry_find:
vmf->page = page;
return ret | VM_FAULT_LOCKED;
-outside_data_content:
- /*
- * An external ptracer can access pages that normally aren't
- * accessible..
- */
- if (vma->vm_mm == current->mm)
- return VM_FAULT_SIGBUS;
-
- /* Fall through to the non-read-ahead case */
no_cached_page:
/*
* We're only likely to ever get here if MADV_RANDOM is in
@@ -2510,21 +2502,17 @@ generic_file_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
}
retval = mapping->a_ops->direct_IO(rw, iocb, iov, offset, nr_segs);
- if (retval)
- goto out;
/*
* Finally, try again to invalidate clean pages which might have been
- * faulted in by get_user_pages() if the source of the write was an
- * mmap()ed region of the file we're writing. That's a pretty crazy
- * thing to do, so we don't support it 100%. If this invalidation
- * fails and we have -EIOCBQUEUED we ignore the failure.
+ * cached by non-direct readahead, or faulted in by get_user_pages()
+ * if the source of the write was an mmap'ed region of the file
+ * we're writing. Either one is a pretty crazy thing to do,
+ * so we don't support it 100%. If this invalidation
+ * fails, tough, the write still worked...
*/
if (rw == WRITE && mapping->nrpages) {
- int err = invalidate_inode_pages2_range(mapping,
- offset >> PAGE_CACHE_SHIFT, end);
- if (err && retval >= 0)
- retval = err;
+ invalidate_inode_pages2_range(mapping, offset >> PAGE_CACHE_SHIFT, end);
}
out:
return retval;
diff --git a/mm/nommu.c b/mm/nommu.c
index 8f09333f78e1..35622c590925 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -12,6 +12,7 @@
* Copyright (c) 2002 Greg Ungerer <gerg@snapgear.com>
*/
+#include <linux/module.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/swap.h>
diff --git a/mm/shmem.c b/mm/shmem.c
index 404e53bb2127..253d205914ba 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -915,6 +915,21 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
struct inode *inode;
BUG_ON(!PageLocked(page));
+ /*
+ * shmem_backing_dev_info's capabilities prevent regular writeback or
+ * sync from ever calling shmem_writepage; but a stacking filesystem
+ * may use the ->writepage of its underlying filesystem, in which case
+ * we want to do nothing when that underlying filesystem is tmpfs
+ * (writing out to swap is useful as a response to memory pressure, but
+ * of no use to stabilize the data) - just redirty the page, unlock it
+ * and claim success in this case. AOP_WRITEPAGE_ACTIVATE, and the
+ * page_mapped check below, must be avoided unless we're in reclaim.
+ */
+ if (!wbc->for_reclaim) {
+ set_page_dirty(page);
+ unlock_page(page);
+ return 0;
+ }
BUG_ON(page_mapped(page));
mapping = page->mapping;
diff --git a/mm/slub.c b/mm/slub.c
index aac1dd3c657d..bcdb2c8941a5 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2734,7 +2734,7 @@ static void slab_mem_offline_callback(void *arg)
* and offline_pages() function shoudn't call this
* callback. So, we must fail.
*/
- BUG_ON(atomic_read(&n->nr_slabs));
+ BUG_ON(atomic_long_read(&n->nr_slabs));
s->node[offline_node] = NULL;
kmem_cache_free(kmalloc_caches, n);
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index d3b718b0c20a..22620f6a976b 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -24,6 +24,7 @@
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/vmalloc.h>
+#include <linux/sched.h>
#include <asm/dma.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
diff --git a/mm/sparse.c b/mm/sparse.c
index 08fb14f5eea3..e06f514fe04f 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -220,12 +220,6 @@ static int __meminit sparse_init_one_section(struct mem_section *ms,
return 1;
}
-__attribute__((weak)) __init
-void *alloc_bootmem_high_node(pg_data_t *pgdat, unsigned long size)
-{
- return NULL;
-}
-
static unsigned long usemap_size(void)
{
unsigned long size_bytes;
@@ -267,11 +261,6 @@ struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid)
if (map)
return map;
- map = alloc_bootmem_high_node(NODE_DATA(nid),
- sizeof(struct page) * PAGES_PER_SECTION);
- if (map)
- return map;
-
map = alloc_bootmem_node(NODE_DATA(nid),
sizeof(struct page) * PAGES_PER_SECTION);
return map;