aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/arch/arm64/kernel/elfcore.c
blob: 27ef7ad3ffd2e27a5c6fdc1e1a63c179df2e56e2 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
// SPDX-License-Identifier: GPL-2.0-only

#include <linux/coredump.h>
#include <linux/elfcore.h>
#include <linux/kernel.h>
#include <linux/mm.h>

#include <asm/cpufeature.h>
#include <asm/mte.h>

#define for_each_mte_vma(vmi, vma)					\
	if (system_supports_mte())					\
		for_each_vma(vmi, vma)					\
			if (vma->vm_flags & VM_MTE)

static unsigned long mte_vma_tag_dump_size(struct vm_area_struct *vma)
{
	if (vma->vm_flags & VM_DONTDUMP)
		return 0;

	return vma_pages(vma) * MTE_PAGE_TAG_STORAGE;
}

/* Derived from dump_user_range(); start/end must be page-aligned */
static int mte_dump_tag_range(struct coredump_params *cprm,
			      unsigned long start, unsigned long end)
{
	int ret = 1;
	unsigned long addr;
	void *tags = NULL;

	for (addr = start; addr < end; addr += PAGE_SIZE) {
		struct page *page = get_dump_page(addr);

		/*
		 * get_dump_page() returns NULL when encountering an empty
		 * page table entry that would otherwise have been filled with
		 * the zero page. Skip the equivalent tag dump which would
		 * have been all zeros.
		 */
		if (!page) {
			dump_skip(cprm, MTE_PAGE_TAG_STORAGE);
			continue;
		}

		/*
		 * Pages mapped in user space as !pte_access_permitted() (e.g.
		 * PROT_EXEC only) may not have the PG_mte_tagged flag set.
		 */
		if (!test_bit(PG_mte_tagged, &page->flags)) {
			put_page(page);
			dump_skip(cprm, MTE_PAGE_TAG_STORAGE);
			continue;
		}

		if (!tags) {
			tags = mte_allocate_tag_storage();
			if (!tags) {
				put_page(page);
				ret = 0;
				break;
			}
		}

		mte_save_page_tags(page_address(page), tags);
		put_page(page);
		if (!dump_emit(cprm, tags, MTE_PAGE_TAG_STORAGE)) {
			mte_free_tag_storage(tags);
			ret = 0;
			break;
		}
	}

	if (tags)
		mte_free_tag_storage(tags);

	return ret;
}

Elf_Half elf_core_extra_phdrs(void)
{
	struct vm_area_struct *vma;
	int vma_count = 0;
	VMA_ITERATOR(vmi, current->mm, 0);

	for_each_mte_vma(vmi, vma)
		vma_count++;

	return vma_count;
}

int elf_core_write_extra_phdrs(struct coredump_params *cprm, loff_t offset)
{
	struct vm_area_struct *vma;
	VMA_ITERATOR(vmi, current->mm, 0);

	for_each_mte_vma(vmi, vma) {
		struct elf_phdr phdr;

		phdr.p_type = PT_AARCH64_MEMTAG_MTE;
		phdr.p_offset = offset;
		phdr.p_vaddr = vma->vm_start;
		phdr.p_paddr = 0;
		phdr.p_filesz = mte_vma_tag_dump_size(vma);
		phdr.p_memsz = vma->vm_end - vma->vm_start;
		offset += phdr.p_filesz;
		phdr.p_flags = 0;
		phdr.p_align = 0;

		if (!dump_emit(cprm, &phdr, sizeof(phdr)))
			return 0;
	}

	return 1;
}

size_t elf_core_extra_data_size(void)
{
	struct vm_area_struct *vma;
	size_t data_size = 0;
	VMA_ITERATOR(vmi, current->mm, 0);

	for_each_mte_vma(vmi, vma)
		data_size += mte_vma_tag_dump_size(vma);

	return data_size;
}

int elf_core_write_extra_data(struct coredump_params *cprm)
{
	struct vm_area_struct *vma;
	VMA_ITERATOR(vmi, current->mm, 0);

	for_each_mte_vma(vmi, vma) {
		if (vma->vm_flags & VM_DONTDUMP)
			continue;

		if (!mte_dump_tag_range(cprm, vma->vm_start, vma->vm_end))
			return 0;
	}

	return 1;
}