From edbe7d23b4482e7f33179290bcff3b1feae1c5f3 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Wed, 25 Aug 2010 13:39:16 -0700 Subject: memblock: Add find_memory_core_early() According to node range in early_node_map[] with __memblock_find_in_range to find free range. Will be used by memblock_x86_find_in_range_node() memblock_x86_find_in_range_node will be used to find right buffer for NODE_DATA Signed-off-by: Yinghai Lu Signed-off-by: H. Peter Anvin --- include/linux/mm.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'include/linux/mm.h') diff --git a/include/linux/mm.h b/include/linux/mm.h index a2b48041b910..993e85f0afcb 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1164,6 +1164,8 @@ extern void free_bootmem_with_active_regions(int nid, unsigned long max_low_pfn); int add_from_early_node_map(struct range *range, int az, int nr_range, int nid); +u64 __init find_memory_core_early(int nid, u64 size, u64 align, + u64 goal, u64 limit); void *__alloc_memory_core_early(int nodeid, u64 size, u64 align, u64 goal, u64 limit); typedef int (*work_fn_t)(unsigned long, unsigned long, void *); -- cgit v1.2.3-59-g8ed1b From aa50d3a7aa8147b9e14dc9d5972a5d2359db4ef8 Mon Sep 17 00:00:00 2001 From: Andi Kleen Date: Wed, 6 Oct 2010 21:45:00 +0200 Subject: Encode huge page size for VM_FAULT_HWPOISON errors This fixes a problem introduced with the hugetlb hwpoison handling The user space SIGBUS signalling wants to know the size of the hugepage that caused a HWPOISON fault. Unfortunately the architecture page fault handlers do not have easy access to the struct page. Pass the information out in the fault error code instead. I added a separate VM_FAULT_HWPOISON_LARGE bit for this case and encode the hpage index in some free upper bits of the fault code. The small page hwpoison keeps stays with the VM_FAULT_HWPOISON name to minimize changes. Also add code to hugetlb.h to convert that index into a page shift. Will be used in a further patch. Cc: Naoya Horiguchi Cc: fengguang.wu@intel.com Signed-off-by: Andi Kleen --- include/linux/hugetlb.h | 6 ++++++ include/linux/mm.h | 12 ++++++++++-- mm/hugetlb.c | 6 ++++-- mm/memory.c | 3 ++- 4 files changed, 22 insertions(+), 5 deletions(-) (limited to 'include/linux/mm.h') diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 796f30e00806..943c76b3d4bb 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -307,6 +307,11 @@ static inline struct hstate *page_hstate(struct page *page) return size_to_hstate(PAGE_SIZE << compound_order(page)); } +static inline unsigned hstate_index_to_shift(unsigned index) +{ + return hstates[index].order + PAGE_SHIFT; +} + #else struct hstate {}; #define alloc_huge_page_node(h, nid) NULL @@ -324,6 +329,7 @@ static inline unsigned int pages_per_huge_page(struct hstate *h) { return 1; } +#define hstate_index_to_shift(index) 0 #endif #endif /* _LINUX_HUGETLB_H */ diff --git a/include/linux/mm.h b/include/linux/mm.h index 74949fbef8c6..f7e9efc6720b 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -718,12 +718,20 @@ static inline int page_mapped(struct page *page) #define VM_FAULT_SIGBUS 0x0002 #define VM_FAULT_MAJOR 0x0004 #define VM_FAULT_WRITE 0x0008 /* Special case for get_user_pages */ -#define VM_FAULT_HWPOISON 0x0010 /* Hit poisoned page */ +#define VM_FAULT_HWPOISON 0x0010 /* Hit poisoned small page */ +#define VM_FAULT_HWPOISON_LARGE 0x0020 /* Hit poisoned large page. Index encoded in upper bits */ #define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */ #define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */ -#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON) +#define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */ + +#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | \ + VM_FAULT_HWPOISON_LARGE) + +/* Encode hstate index for a hwpoisoned large page */ +#define VM_FAULT_SET_HINDEX(x) ((x) << 12) +#define VM_FAULT_GET_HINDEX(x) (((x) >> 12) & 0xf) /* * Can be called by the pagefault handler when it gets a VM_FAULT_OOM. diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 67cd03239b75..96991ded82fe 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -2589,7 +2589,8 @@ retry: * So we need to block hugepage fault by PG_hwpoison bit check. */ if (unlikely(PageHWPoison(page))) { - ret = VM_FAULT_HWPOISON; + ret = VM_FAULT_HWPOISON | + VM_FAULT_SET_HINDEX(h - hstates); goto backout_unlocked; } page_dup_rmap(page); @@ -2656,7 +2657,8 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, migration_entry_wait(mm, (pmd_t *)ptep, address); return 0; } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) - return VM_FAULT_HWPOISON; + return VM_FAULT_HWPOISON_LARGE | + VM_FAULT_SET_HINDEX(h - hstates); } ptep = huge_pte_alloc(mm, address, huge_page_size(h)); diff --git a/mm/memory.c b/mm/memory.c index 0e18b4d649ec..c2d6dd315659 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1450,7 +1450,8 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, if (ret & VM_FAULT_OOM) return i ? i : -ENOMEM; if (ret & - (VM_FAULT_HWPOISON|VM_FAULT_SIGBUS)) + (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE| + VM_FAULT_SIGBUS)) return i ? i : -EFAULT; BUG(); } -- cgit v1.2.3-59-g8ed1b From f629d1c9bd0dbc44a6c4f9a4a67d1646c42bfc6f Mon Sep 17 00:00:00 2001 From: Michael Rubin Date: Tue, 26 Oct 2010 14:21:33 -0700 Subject: mm: add account_page_writeback() To help developers and applications gain visibility into writeback behaviour this patch adds two counters to /proc/vmstat. # grep nr_dirtied /proc/vmstat nr_dirtied 3747 # grep nr_written /proc/vmstat nr_written 3618 These entries allow user apps to understand writeback behaviour over time and learn how it is impacting their performance. Currently there is no way to inspect dirty and writeback speed over time. It's not possible for nr_dirty/nr_writeback. These entries are necessary to give visibility into writeback behaviour. We have /proc/diskstats which lets us understand the io in the block layer. We have blktrace for more in depth understanding. We have e2fsprogs and debugsfs to give insight into the file systems behaviour, but we don't offer our users the ability understand what writeback is doing. There is no way to know how active it is over the whole system, if it's falling behind or to quantify it's efforts. With these values exported users can easily see how much data applications are sending through writeback and also at what rates writeback is processing this data. Comparing the rates of change between the two allow developers to see when writeback is not able to keep up with incoming traffic and the rate of dirty memory being sent to the IO back end. This allows folks to understand their io workloads and track kernel issues. Non kernel engineers at Google often use these counters to solve puzzling performance problems. Patch #4 adds a pernode vmstat file with nr_dirtied and nr_written Patch #5 add writeback thresholds to /proc/vmstat Currently these values are in debugfs. But they should be promoted to /proc since they are useful for developers who are writing databases and file servers and are not debugging the kernel. The output is as below: # grep threshold /proc/vmstat nr_pages_dirty_threshold 409111 nr_pages_dirty_background_threshold 818223 This patch: This allows code outside of the mm core to safely manipulate page writeback state and not worry about the other accounting. Not using these routines means that some code will lose track of the accounting and we get bugs. Modify nilfs2 to use interface. Signed-off-by: Michael Rubin Reviewed-by: KOSAKI Motohiro Reviewed-by: Wu Fengguang Cc: KONISHI Ryusuke Cc: Jiro SEKIBA Cc: Dave Chinner Cc: Jens Axboe Cc: KOSAKI Motohiro Cc: Nick Piggin Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- fs/nilfs2/segment.c | 2 +- include/linux/mm.h | 1 + mm/page-writeback.c | 13 ++++++++++++- 3 files changed, 14 insertions(+), 2 deletions(-) (limited to 'include/linux/mm.h') diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c index d926af626177..687d090cea34 100644 --- a/fs/nilfs2/segment.c +++ b/fs/nilfs2/segment.c @@ -1609,7 +1609,7 @@ nilfs_copy_replace_page_buffers(struct page *page, struct list_head *out) kunmap_atomic(kaddr, KM_USER0); if (!TestSetPageWriteback(clone_page)) - inc_zone_page_state(clone_page, NR_WRITEBACK); + account_page_writeback(clone_page); unlock_page(clone_page); return 0; diff --git a/include/linux/mm.h b/include/linux/mm.h index a4c66846fb8f..c36297faf7cb 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -868,6 +868,7 @@ int __set_page_dirty_no_writeback(struct page *page); int redirty_page_for_writepage(struct writeback_control *wbc, struct page *page); void account_page_dirtied(struct page *page, struct address_space *mapping); +void account_page_writeback(struct page *page); int set_page_dirty(struct page *page); int set_page_dirty_lock(struct page *page); int clear_page_dirty_for_io(struct page *page); diff --git a/mm/page-writeback.c b/mm/page-writeback.c index e3bccac1f025..94159819a651 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -1128,6 +1128,17 @@ void account_page_dirtied(struct page *page, struct address_space *mapping) } EXPORT_SYMBOL(account_page_dirtied); +/* + * Helper function for set_page_writeback family. + * NOTE: Unlike account_page_dirtied this does not rely on being atomic + * wrt interrupts. + */ +void account_page_writeback(struct page *page) +{ + inc_zone_page_state(page, NR_WRITEBACK); +} +EXPORT_SYMBOL(account_page_writeback); + /* * For address_spaces which do not use buffers. Just tag the page as dirty in * its radix tree. @@ -1366,7 +1377,7 @@ int test_set_page_writeback(struct page *page) ret = TestSetPageWriteback(page); } if (!ret) - inc_zone_page_state(page, NR_WRITEBACK); + account_page_writeback(page); return ret; } -- cgit v1.2.3-59-g8ed1b From bce54bbfde07e8b300f39dae14756c12a6ceca65 Mon Sep 17 00:00:00 2001 From: Will Deacon Date: Tue, 26 Oct 2010 14:21:37 -0700 Subject: mm: fix typo in mm.h when NODE_NOT_IN_PAGE_FLAGS NODE_NOT_IN_PAGE_FLAGS is defined in mm.h when the node information is not stored in the page flags bitmap. Unfortunately, there's a typo in one of the checks for it. This patch fixes it (s/NODE_NOT_IN_PAGEFLAGS/NODE_NOT_IN_PAGE_FLAGS/). Since this has been around for ages, I doubt it's been causing any serious problems. Signed-off-by: Will Deacon Cc: Christoph Lameter Cc: Mel Gorman Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'include/linux/mm.h') diff --git a/include/linux/mm.h b/include/linux/mm.h index c36297faf7cb..2862009f9573 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -497,8 +497,8 @@ static inline void set_compound_order(struct page *page, unsigned long order) #define NODES_PGSHIFT (NODES_PGOFF * (NODES_WIDTH != 0)) #define ZONES_PGSHIFT (ZONES_PGOFF * (ZONES_WIDTH != 0)) -/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allcator */ -#ifdef NODE_NOT_IN_PAGEFLAGS +/* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */ +#ifdef NODE_NOT_IN_PAGE_FLAGS #define ZONEID_SHIFT (SECTIONS_SHIFT + ZONES_SHIFT) #define ZONEID_PGOFF ((SECTIONS_PGOFF < ZONES_PGOFF)? \ SECTIONS_PGOFF : ZONES_PGOFF) -- cgit v1.2.3-59-g8ed1b From d065bd810b6deb67d4897a14bfe21f8eb526ba99 Mon Sep 17 00:00:00 2001 From: Michel Lespinasse Date: Tue, 26 Oct 2010 14:21:57 -0700 Subject: mm: retry page fault when blocking on disk transfer This change reduces mmap_sem hold times that are caused by waiting for disk transfers when accessing file mapped VMAs. It introduces the VM_FAULT_ALLOW_RETRY flag, which indicates that the call site wants mmap_sem to be released if blocking on a pending disk transfer. In that case, filemap_fault() returns the VM_FAULT_RETRY status bit and do_page_fault() will then re-acquire mmap_sem and retry the page fault. It is expected that the retry will hit the same page which will now be cached, and thus it will complete with a low mmap_sem hold time. Tests: - microbenchmark: thread A mmaps a large file and does random read accesses to the mmaped area - achieves about 55 iterations/s. Thread B does mmap/munmap in a loop at a separate location - achieves 55 iterations/s before, 15000 iterations/s after. - We are seeing related effects in some applications in house, which show significant performance regressions when running without this change. [akpm@linux-foundation.org: fix warning & crash] Signed-off-by: Michel Lespinasse Acked-by: Rik van Riel Acked-by: Linus Torvalds Cc: Nick Piggin Reviewed-by: Wu Fengguang Cc: Ying Han Cc: Peter Zijlstra Cc: Ingo Molnar Cc: Thomas Gleixner Acked-by: "H. Peter Anvin" Cc: Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- arch/x86/mm/fault.c | 38 ++++++++++++++++++++++++++------------ include/linux/mm.h | 2 ++ include/linux/pagemap.h | 13 +++++++++++++ mm/filemap.c | 16 +++++++++++++++- mm/memory.c | 10 ++++++++-- 5 files changed, 64 insertions(+), 15 deletions(-) (limited to 'include/linux/mm.h') diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 852b319edbdc..9b2345c9e0c3 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -956,8 +956,10 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code) struct task_struct *tsk; unsigned long address; struct mm_struct *mm; - int write; int fault; + int write = error_code & PF_WRITE; + unsigned int flags = FAULT_FLAG_ALLOW_RETRY | + (write ? FAULT_FLAG_WRITE : 0); tsk = current; mm = tsk->mm; @@ -1068,6 +1070,7 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code) bad_area_nosemaphore(regs, error_code, address); return; } +retry: down_read(&mm->mmap_sem); } else { /* @@ -1111,8 +1114,6 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code) * we can handle it.. */ good_area: - write = error_code & PF_WRITE; - if (unlikely(access_error(error_code, write, vma))) { bad_area_access_error(regs, error_code, address); return; @@ -1123,21 +1124,34 @@ good_area: * make sure we exit gracefully rather than endlessly redo * the fault: */ - fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); + fault = handle_mm_fault(mm, vma, address, flags); if (unlikely(fault & VM_FAULT_ERROR)) { mm_fault_error(regs, error_code, address, fault); return; } - if (fault & VM_FAULT_MAJOR) { - tsk->maj_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, - regs, address); - } else { - tsk->min_flt++; - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, - regs, address); + /* + * Major/minor page fault accounting is only done on the + * initial attempt. If we go through a retry, it is extremely + * likely that the page will be found in page cache at that point. + */ + if (flags & FAULT_FLAG_ALLOW_RETRY) { + if (fault & VM_FAULT_MAJOR) { + tsk->maj_flt++; + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, + regs, address); + } else { + tsk->min_flt++; + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, + regs, address); + } + if (fault & VM_FAULT_RETRY) { + /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk + * of starvation. */ + flags &= ~FAULT_FLAG_ALLOW_RETRY; + goto retry; + } } check_v8086_mode(regs, address, tsk); diff --git a/include/linux/mm.h b/include/linux/mm.h index 2862009f9573..3bf46655b50a 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -144,6 +144,7 @@ extern pgprot_t protection_map[16]; #define FAULT_FLAG_WRITE 0x01 /* Fault was a write access */ #define FAULT_FLAG_NONLINEAR 0x02 /* Fault was via a nonlinear mapping */ #define FAULT_FLAG_MKWRITE 0x04 /* Fault was mkwrite of existing pte */ +#define FAULT_FLAG_ALLOW_RETRY 0x08 /* Retry fault if blocking */ /* * This interface is used by x86 PAT code to identify a pfn mapping that is @@ -723,6 +724,7 @@ static inline int page_mapped(struct page *page) #define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */ #define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */ +#define VM_FAULT_RETRY 0x0400 /* ->fault blocked, must retry */ #define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */ diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index e12cdc6d79ee..2d1ffe3cf1ee 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -299,6 +299,8 @@ static inline pgoff_t linear_page_index(struct vm_area_struct *vma, extern void __lock_page(struct page *page); extern int __lock_page_killable(struct page *page); extern void __lock_page_nosync(struct page *page); +extern int __lock_page_or_retry(struct page *page, struct mm_struct *mm, + unsigned int flags); extern void unlock_page(struct page *page); static inline void __set_page_locked(struct page *page) @@ -350,6 +352,17 @@ static inline void lock_page_nosync(struct page *page) __lock_page_nosync(page); } +/* + * lock_page_or_retry - Lock the page, unless this would block and the + * caller indicated that it can handle a retry. + */ +static inline int lock_page_or_retry(struct page *page, struct mm_struct *mm, + unsigned int flags) +{ + might_sleep(); + return trylock_page(page) || __lock_page_or_retry(page, mm, flags); +} + /* * This is exported only for wait_on_page_locked/wait_on_page_writeback. * Never use this directly! diff --git a/mm/filemap.c b/mm/filemap.c index 8ed709a83eb7..33f81252a744 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -612,6 +612,19 @@ void __lock_page_nosync(struct page *page) TASK_UNINTERRUPTIBLE); } +int __lock_page_or_retry(struct page *page, struct mm_struct *mm, + unsigned int flags) +{ + if (!(flags & FAULT_FLAG_ALLOW_RETRY)) { + __lock_page(page); + return 1; + } else { + up_read(&mm->mmap_sem); + wait_on_page_locked(page); + return 0; + } +} + /** * find_get_page - find and get a page reference * @mapping: the address_space to search @@ -1550,7 +1563,8 @@ retry_find: goto no_cached_page; } - lock_page(page); + if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) + return ret | VM_FAULT_RETRY; /* Did it get truncated? */ if (unlikely(page->mapping != mapping)) { diff --git a/mm/memory.c b/mm/memory.c index 92cc54e94137..714c4438d887 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2627,6 +2627,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, struct page *page, *swapcache = NULL; swp_entry_t entry; pte_t pte; + int locked; struct mem_cgroup *ptr = NULL; int exclusive = 0; int ret = 0; @@ -2677,8 +2678,12 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, goto out_release; } - lock_page(page); + locked = lock_page_or_retry(page, mm, flags); delayacct_clear_flag(DELAYACCT_PF_SWAPIN); + if (!locked) { + ret |= VM_FAULT_RETRY; + goto out_release; + } /* * Make sure try_to_free_swap or reuse_swap_page or swapoff did not @@ -2927,7 +2932,8 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma, vmf.page = NULL; ret = vma->vm_ops->fault(vma, &vmf); - if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE))) + if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | + VM_FAULT_RETRY))) return ret; if (unlikely(PageHWPoison(vmf.page))) { -- cgit v1.2.3-59-g8ed1b From 25ca1d6c02fe1c6d90d918867ef670d323725458 Mon Sep 17 00:00:00 2001 From: Namhyung Kim Date: Tue, 26 Oct 2010 14:21:59 -0700 Subject: mm: wrap get_locked_pte() using __cond_lock() The get_locked_pte() conditionally grabs 'ptl' in case of returning non-NULL. This leads sparse to complain about context imbalance. Rename and wrap it using __cond_lock() to make sparse happy. Signed-off-by: Namhyung Kim Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- include/linux/mm.h | 10 +++++++++- mm/memory.c | 2 +- 2 files changed, 10 insertions(+), 2 deletions(-) (limited to 'include/linux/mm.h') diff --git a/include/linux/mm.h b/include/linux/mm.h index 3bf46655b50a..721f451c3029 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1034,7 +1034,15 @@ extern void unregister_shrinker(struct shrinker *); int vma_wants_writenotify(struct vm_area_struct *vma); -extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl); +extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, + spinlock_t **ptl); +static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, + spinlock_t **ptl) +{ + pte_t *ptep; + __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl)); + return ptep; +} #ifdef __PAGETABLE_PUD_FOLDED static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, diff --git a/mm/memory.c b/mm/memory.c index 714c4438d887..4ce24a4d5d48 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1591,7 +1591,7 @@ struct page *get_dump_page(unsigned long addr) } #endif /* CONFIG_ELF_CORE */ -pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, +pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl) { pgd_t * pgd = pgd_offset(mm, addr); -- cgit v1.2.3-59-g8ed1b