aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/include
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2017-08-07 10:42:09 -0700
committerDavid S. Miller <davem@davemloft.net>2017-08-07 10:42:09 -0700
commitfde6af4729b005dc9dc936b0ed9f1b27b5b2d0f4 (patch)
treefb52e170a7b2130b8a5266ac665d4314447fef95 /arch/sparc/include
parentMerge branch 'net-sched-summer-cleanup-part-2-ndo_setup_tc' (diff)
parentnet/mlx5: Increase the maximum flow counters supported (diff)
downloadlinux-dev-fde6af4729b005dc9dc936b0ed9f1b27b5b2d0f4.tar.xz
linux-dev-fde6af4729b005dc9dc936b0ed9f1b27b5b2d0f4.zip
Merge tag 'mlx5-shared-2017-08-07' of git://git.kernel.org/pub/scm/linux/kernel/git/mellanox/linux
Saeed Mahameed says: ==================== mlx5-shared-2017-08-07 This series includes some mlx5 updates for both net-next and rdma trees. From Saeed, Core driver updates to allow selectively building the driver with or without some large driver components, such as - E-Switch (Ethernet SRIOV support). - Multi-Physical Function Switch (MPFs) support. For that we split E-Switch and MPFs functionalities into separate files. From Erez, Delay mlx5_core events when mlx5 interfaces, namely mlx5_ib, registration is taking place and until it completes. From Rabie, Increase the maximum supported flow counters. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc/include')
-rw-r--r--arch/sparc/include/asm/mmu_context_64.h14
1 files changed, 9 insertions, 5 deletions
diff --git a/arch/sparc/include/asm/mmu_context_64.h b/arch/sparc/include/asm/mmu_context_64.h
index 2cddcda4f85f..87841d687f8d 100644
--- a/arch/sparc/include/asm/mmu_context_64.h
+++ b/arch/sparc/include/asm/mmu_context_64.h
@@ -27,9 +27,11 @@ void destroy_context(struct mm_struct *mm);
void __tsb_context_switch(unsigned long pgd_pa,
struct tsb_config *tsb_base,
struct tsb_config *tsb_huge,
- unsigned long tsb_descr_pa);
+ unsigned long tsb_descr_pa,
+ unsigned long secondary_ctx);
-static inline void tsb_context_switch(struct mm_struct *mm)
+static inline void tsb_context_switch_ctx(struct mm_struct *mm,
+ unsigned long ctx)
{
__tsb_context_switch(__pa(mm->pgd),
&mm->context.tsb_block[MM_TSB_BASE],
@@ -40,9 +42,12 @@ static inline void tsb_context_switch(struct mm_struct *mm)
#else
NULL
#endif
- , __pa(&mm->context.tsb_descr[MM_TSB_BASE]));
+ , __pa(&mm->context.tsb_descr[MM_TSB_BASE]),
+ ctx);
}
+#define tsb_context_switch(X) tsb_context_switch_ctx(X, 0)
+
void tsb_grow(struct mm_struct *mm,
unsigned long tsb_index,
unsigned long mm_rss);
@@ -112,8 +117,7 @@ static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, str
* cpu0 to update it's TSB because at that point the cpu_vm_mask
* only had cpu1 set in it.
*/
- load_secondary_context(mm);
- tsb_context_switch(mm);
+ tsb_context_switch_ctx(mm, CTX_HWBITS(mm->context));
/* Any time a processor runs a context on an address space
* for the first time, we must flush that context out of the