aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/mm/mteswap.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm64/mm/mteswap.c')
-rw-r--r--arch/arm64/mm/mteswap.c14
1 files changed, 5 insertions, 9 deletions
diff --git a/arch/arm64/mm/mteswap.c b/arch/arm64/mm/mteswap.c
index 7c4ef56265ee..bed803d8e158 100644
--- a/arch/arm64/mm/mteswap.c
+++ b/arch/arm64/mm/mteswap.c
@@ -12,7 +12,7 @@ static DEFINE_XARRAY(mte_pages);
void *mte_allocate_tag_storage(void)
{
/* tags granule is 16 bytes, 2 tags stored per byte */
- return kmalloc(PAGE_SIZE / 16 / 2, GFP_KERNEL);
+ return kmalloc(MTE_PAGE_TAG_STORAGE, GFP_KERNEL);
}
void mte_free_tag_storage(char *storage)
@@ -53,16 +53,12 @@ bool mte_restore_tags(swp_entry_t entry, struct page *page)
if (!tags)
return false;
- page_kasan_tag_reset(page);
/*
- * We need smp_wmb() in between setting the flags and clearing the
- * tags because if another thread reads page->flags and builds a
- * tagged address out of it, there is an actual dependency to the
- * memory access, but on the current thread we do not guarantee that
- * the new page->flags are visible before the tags were updated.
+ * Test PG_mte_tagged again in case it was racing with another
+ * set_pte_at().
*/
- smp_wmb();
- mte_restore_page_tags(page_address(page), tags);
+ if (!test_and_set_bit(PG_mte_tagged, &page->flags))
+ mte_restore_page_tags(page_address(page), tags);
return true;
}