From c2fda5fed81eea077363b285b66eafce20dfd45a Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 22 Dec 2006 14:25:52 +0100 Subject: [PATCH] Fix up page_mkclean_one(): virtual caches, s390 - add flush_cache_page() for all those virtual indexed cache architectures. - handle s390. Signed-off-by: Peter Zijlstra Signed-off-by: Linus Torvalds --- mm/rmap.c | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) (limited to 'mm') diff --git a/mm/rmap.c b/mm/rmap.c index b3deda8b5019..57306fa0114d 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -433,7 +433,7 @@ static int page_mkclean_one(struct page *page, struct vm_area_struct *vma) { struct mm_struct *mm = vma->vm_mm; unsigned long address; - pte_t *pte, entry; + pte_t *pte; spinlock_t *ptl; int ret = 0; @@ -445,17 +445,18 @@ static int page_mkclean_one(struct page *page, struct vm_area_struct *vma) if (!pte) goto out; - if (!pte_dirty(*pte) && !pte_write(*pte)) - goto unlock; + if (pte_dirty(*pte) || pte_write(*pte)) { + pte_t entry; - entry = ptep_get_and_clear(mm, address, pte); - entry = pte_mkclean(entry); - entry = pte_wrprotect(entry); - ptep_establish(vma, address, pte, entry); - lazy_mmu_prot_update(entry); - ret = 1; + flush_cache_page(vma, address, pte_pfn(*pte)); + entry = ptep_clear_flush(vma, address, pte); + entry = pte_wrprotect(entry); + entry = pte_mkclean(entry); + set_pte_at(vma, address, pte, entry); + lazy_mmu_prot_update(entry); + ret = 1; + } -unlock: pte_unmap_unlock(pte, ptl); out: return ret; @@ -490,6 +491,8 @@ int page_mkclean(struct page *page) if (mapping) ret = page_mkclean_file(mapping, page); } + if (page_test_and_clear_dirty(page)) + ret = 1; return ret; } -- cgit v1.2.3-59-g8ed1b