aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2006-04-05 09:45:47 +0100
committerRalf Baechle <ralf@linux-mips.org>2006-04-19 04:14:28 +0200
commitf088fc84f94c1a36943e28ad704a9a740a35f877 (patch)
tree309add2d3fe666920a681985c36d55f731df9922 /include
parent[MIPS] MT: Improved multithreading support. (diff)
downloadlinux-dev-f088fc84f94c1a36943e28ad704a9a740a35f877.tar.xz
linux-dev-f088fc84f94c1a36943e28ad704a9a740a35f877.zip
[MIPS] FPU affinity for MT ASE.
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'include')
-rw-r--r--include/asm-mips/cpu-features.h2
-rw-r--r--include/asm-mips/fpu.h4
-rw-r--r--include/asm-mips/processor.h16
-rw-r--r--include/asm-mips/system.h32
4 files changed, 53 insertions, 1 deletions
diff --git a/include/asm-mips/cpu-features.h b/include/asm-mips/cpu-features.h
index 3f2b6d9ac45e..254e11ed247b 100644
--- a/include/asm-mips/cpu-features.h
+++ b/include/asm-mips/cpu-features.h
@@ -40,7 +40,7 @@
#define cpu_has_sb1_cache (cpu_data[0].options & MIPS_CPU_SB1_CACHE)
#endif
#ifndef cpu_has_fpu
-#define cpu_has_fpu (cpu_data[0].options & MIPS_CPU_FPU)
+#define cpu_has_fpu (current_cpu_data.options & MIPS_CPU_FPU)
#endif
#ifndef cpu_has_32fpr
#define cpu_has_32fpr (cpu_data[0].options & MIPS_CPU_32FPR)
diff --git a/include/asm-mips/fpu.h b/include/asm-mips/fpu.h
index 9c828b1f8218..b0f50015e252 100644
--- a/include/asm-mips/fpu.h
+++ b/include/asm-mips/fpu.h
@@ -21,6 +21,10 @@
#include <asm/processor.h>
#include <asm/current.h>
+#ifdef CONFIG_MIPS_MT_FPAFF
+#include <asm/mips_mt.h>
+#endif
+
struct sigcontext;
struct sigcontext32;
diff --git a/include/asm-mips/processor.h b/include/asm-mips/processor.h
index 786651340de1..0fb75f0762e0 100644
--- a/include/asm-mips/processor.h
+++ b/include/asm-mips/processor.h
@@ -134,6 +134,12 @@ struct thread_struct {
/* Saved fpu/fpu emulator stuff. */
union mips_fpu_union fpu;
+#ifdef CONFIG_MIPS_MT_FPAFF
+ /* Emulated instruction count */
+ unsigned long emulated_fp;
+ /* Saved per-thread scheduler affinity mask */
+ cpumask_t user_cpus_allowed;
+#endif /* CONFIG_MIPS_MT_FPAFF */
/* Saved state of the DSP ASE, if available. */
struct mips_dsp_state dsp;
@@ -159,6 +165,12 @@ struct thread_struct {
#define MF_N32 MF_32BIT_ADDR
#define MF_N64 0
+#ifdef CONFIG_MIPS_MT_FPAFF
+#define FPAFF_INIT 0, INIT_CPUMASK,
+#else
+#define FPAFF_INIT
+#endif /* CONFIG_MIPS_MT_FPAFF */
+
#define INIT_THREAD { \
/* \
* saved main processor registers \
@@ -174,6 +186,10 @@ struct thread_struct {
*/ \
INIT_FPU, \
/* \
+ * fpu affinity state (null if not FPAFF) \
+ */ \
+ FPAFF_INIT \
+ /* \
* saved dsp/dsp emulator stuff \
*/ \
INIT_DSP, \
diff --git a/include/asm-mips/system.h b/include/asm-mips/system.h
index 39026690d9e4..261f71d16a07 100644
--- a/include/asm-mips/system.h
+++ b/include/asm-mips/system.h
@@ -155,6 +155,37 @@ extern asmlinkage void *resume(void *last, void *next, void *next_ti);
struct task_struct;
+#ifdef CONFIG_MIPS_MT_FPAFF
+
+/*
+ * Handle the scheduler resume end of FPU affinity management. We do this
+ * inline to try to keep the overhead down. If we have been forced to run on
+ * a "CPU" with an FPU because of a previous high level of FP computation,
+ * but did not actually use the FPU during the most recent time-slice (CU1
+ * isn't set), we undo the restriction on cpus_allowed.
+ *
+ * We're not calling set_cpus_allowed() here, because we have no need to
+ * force prompt migration - we're already switching the current CPU to a
+ * different thread.
+ */
+
+#define switch_to(prev,next,last) \
+do { \
+ if (cpu_has_fpu && \
+ (prev->thread.mflags & MF_FPUBOUND) && \
+ (!(KSTK_STATUS(prev) & ST0_CU1))) { \
+ prev->thread.mflags &= ~MF_FPUBOUND; \
+ prev->cpus_allowed = prev->thread.user_cpus_allowed; \
+ } \
+ if (cpu_has_dsp) \
+ __save_dsp(prev); \
+ next->thread.emulated_fp = 0; \
+ (last) = resume(prev, next, next->thread_info); \
+ if (cpu_has_dsp) \
+ __restore_dsp(current); \
+} while(0)
+
+#else
#define switch_to(prev,next,last) \
do { \
if (cpu_has_dsp) \
@@ -163,6 +194,7 @@ do { \
if (cpu_has_dsp) \
__restore_dsp(current); \
} while(0)
+#endif
/*
* On SMP systems, when the scheduler does migration-cost autodetection,