aboutsummaryrefslogtreecommitdiffstats
path: root/mm/mempolicy.c
diff options
context:
space:
mode:
authorHugh Dickins <hugh@veritas.com>2005-10-29 18:16:27 -0700
committerLinus Torvalds <torvalds@g5.osdl.org>2005-10-29 21:40:40 -0700
commit705e87c0c3c38424f7f30556c85bc20e808d2f59 (patch)
tree7a237e6266f4801385e1226cc497b47e3a2458bd /mm/mempolicy.c
parent[PATCH] mm: page fault handler locking (diff)
downloadlinux-dev-705e87c0c3c38424f7f30556c85bc20e808d2f59.tar.xz
linux-dev-705e87c0c3c38424f7f30556c85bc20e808d2f59.zip
[PATCH] mm: pte_offset_map_lock loops
Convert those common loops using page_table_lock on the outside and pte_offset_map within to use just pte_offset_map_lock within instead. These all hold mmap_sem (some exclusively, some not), so at no level can a page table be whipped away from beneath them. But whereas pte_alloc loops tested with the "atomic" pmd_present, these loops are testing with pmd_none, which on i386 PAE tests both lower and upper halves. That's now unsafe, so add a cast into pmd_none to test only the vital lower half: we lose a little sensitivity to a corrupt middle directory, but not enough to worry about. It appears that i386 and UML were the only architectures vulnerable in this way, and pgd and pud no problem. Signed-off-by: Hugh Dickins <hugh@veritas.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/mempolicy.c')
-rw-r--r--mm/mempolicy.c7
1 files changed, 3 insertions, 4 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 11d824f282f1..902d4c9eccdc 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -228,9 +228,9 @@ static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
{
pte_t *orig_pte;
pte_t *pte;
+ spinlock_t *ptl;
- spin_lock(&vma->vm_mm->page_table_lock);
- orig_pte = pte = pte_offset_map(pmd, addr);
+ orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
do {
unsigned long pfn;
unsigned int nid;
@@ -246,8 +246,7 @@ static int check_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
if (!node_isset(nid, *nodes))
break;
} while (pte++, addr += PAGE_SIZE, addr != end);
- pte_unmap(orig_pte);
- spin_unlock(&vma->vm_mm->page_table_lock);
+ pte_unmap_unlock(orig_pte, ptl);
return addr != end;
}