aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/arch
diff options
context:
space:
mode:
authorPavel Tatashin <pasha.tatashin@oracle.com>2017-05-31 11:25:23 -0400
committerDavid S. Miller <davem@davemloft.net>2017-06-06 13:45:29 -0700
commit7a5b4bbf49fe86ce77488a70c5dccfe2d50d7a2d (patch)
treed53db5c2f4b5cd9a675a1b3f85fe370ec8a672b5 /arch
parentsparc64: redefine first version (diff)
downloadwireguard-linux-7a5b4bbf49fe86ce77488a70c5dccfe2d50d7a2d.tar.xz
wireguard-linux-7a5b4bbf49fe86ce77488a70c5dccfe2d50d7a2d.zip
sparc64: add per-cpu mm of secondary contexts
The new wrap is going to use information from this array to figure out mm's that currently have valid secondary contexts setup. Signed-off-by: Pavel Tatashin <pasha.tatashin@oracle.com> Reviewed-by: Bob Picco <bob.picco@oracle.com> Reviewed-by: Steven Sistare <steven.sistare@oracle.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch')
-rw-r--r--arch/sparc/include/asm/mmu_context_64.h5
-rw-r--r--arch/sparc/mm/init_64.c1
2 files changed, 4 insertions, 2 deletions
diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h
index 734a1343d77d..edb45247bfa9 100644
--- a/arch/sparc/include/asm/mmu_context_64.h
+++ b/arch/sparc/include/asm/mmu_context_64.h
@@ -19,6 +19,7 @@ extern spinlock_t ctx_alloc_lock;
extern unsigned long tlb_context_cache;
extern unsigned long mmu_context_bmap[];
+DECLARE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm);
void get_new_mmu_context(struct mm_struct *mm);
#ifdef CONFIG_SMP
void smp_new_mmu_context_version(void);
@@ -76,8 +77,9 @@ void __flush_tlb_mm(unsigned long, unsigned long);
static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk)
{
unsigned long ctx_valid, flags;
- int cpu;
+ int cpu = smp_processor_id();
+ per_cpu(per_cpu_secondary_mm, cpu) = mm;
if (unlikely(mm == &init_mm))
return;
@@ -123,7 +125,6 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
* for the first time, we must flush that context out of the
* local TLB.
*/
- cpu = smp_processor_id();
if (!ctx_valid || !cpumask_test_cpu(cpu, mm_cpumask(mm))) {
cpumask_set_cpu(cpu, mm_cpumask(mm));
__flush_tlb_mm(CTX_HWBITS(mm->context),
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 63b50447bb62..a4c0bc8af820 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -711,6 +711,7 @@ unsigned long tlb_context_cache = CTX_FIRST_VERSION;
#define MAX_CTX_NR (1UL << CTX_NR_BITS)
#define CTX_BMAP_SLOTS BITS_TO_LONGS(MAX_CTX_NR)
DECLARE_BITMAP(mmu_context_bmap, MAX_CTX_NR);
+DEFINE_PER_CPU(struct mm_struct *, per_cpu_secondary_mm) = {0};
/* Caller does TLB context flushing on local CPU if necessary.
* The caller also ensures that CTX_VALID(mm->context) is false.