// SPDX-License-Identifier: GPL-2.0 /* * MMU-generic set_memory implementation for powerpc * * Copyright 2019-2021, IBM Corporation. */ #include #include #include #include #include #include /* * Updates the attributes of a page in three steps: * * 1. invalidate the page table entry * 2. flush the TLB * 3. install the new entry with the updated attributes * * Invalidating the pte means there are situations where this will not work * when in theory it should. * For example: * - removing write from page whilst it is being executed * - setting a page read-only whilst it is being read by another CPU * */ static int change_page_attr(pte_t *ptep, unsigned long addr, void *data) { long action = (long)data; pte_t pte; spin_lock(&init_mm.page_table_lock); /* invalidate the PTE so it's safe to modify */ pte = ptep_get_and_clear(&init_mm, addr, ptep); flush_tlb_kernel_range(addr, addr + PAGE_SIZE); /* modify the PTE bits as desired, then apply */ switch (action) { case SET_MEMORY_RO: pte = pte_wrprotect(pte); break; case SET_MEMORY_RW: pte = pte_mkwrite(pte_mkdirty(pte)); break; case SET_MEMORY_NX: pte = pte_exprotect(pte); break; case SET_MEMORY_X: pte = pte_mkexec(pte); break; default: WARN_ON_ONCE(1); break; } set_pte_at(&init_mm, addr, ptep, pte); /* See ptesync comment in radix__set_pte_at() */ if (radix_enabled()) asm volatile("ptesync": : :"memory"); spin_unlock(&init_mm.page_table_lock); return 0; } int change_memory_attr(unsigned long addr, int numpages, long action) { unsigned long start = ALIGN_DOWN(addr, PAGE_SIZE); unsigned long size = numpages * PAGE_SIZE; if (!numpages) return 0; if (WARN_ON_ONCE(is_vmalloc_or_module_addr((void *)addr) && is_vm_area_hugepages((void *)addr))) return -EINVAL; #ifdef CONFIG_PPC_BOOK3S_64 /* * On hash, the linear mapping is not in the Linux page table so * apply_to_existing_page_range() will have no effect. If in the future * the set_memory_* functions are used on the linear map this will need * to be updated. */ if (!radix_enabled()) { int region = get_region_id(addr); if (WARN_ON_ONCE(region != VMALLOC_REGION_ID && region != IO_REGION_ID)) return -EINVAL; } #endif return apply_to_existing_page_range(&init_mm, start, size, change_page_attr, (void *)action); } /* * Set the attributes of a page: * * This function is used by PPC32 at the end of init to set final kernel memory * protection. It includes changing the maping of the page it is executing from * and data pages it is using. */ static int set_page_attr(pte_t *ptep, unsigned long addr, void *data) { pgprot_t prot = __pgprot((unsigned long)data); spin_lock(&init_mm.page_table_lock); set_pte_at(&init_mm, addr, ptep, pte_modify(*ptep, prot)); flush_tlb_kernel_range(addr, addr + PAGE_SIZE); spin_unlock(&init_mm.page_table_lock); return 0; } int set_memory_attr(unsigned long addr, int numpages, pgprot_t prot) { unsigned long start = ALIGN_DOWN(addr, PAGE_SIZE); unsigned long sz = numpages * PAGE_SIZE; if (numpages <= 0) return 0; return apply_to_existing_page_range(&init_mm, start, sz, set_page_attr, (void *)pgprot_val(prot)); }