aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2006-12-08 15:22:20 +0000
committerRussell King <rmk+kernel@arm.linux.org.uk>2006-12-08 16:05:26 +0000
commitefe90d273b6f365d37c0f82fbbd68a40982c3265 (patch)
treea065719ebabfd6fa8b41e5a01fc5ff0c7ad3e80c
parentMerge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ieee1394/linux1394-2.6 (diff)
downloadlinux-dev-efe90d273b6f365d37c0f82fbbd68a40982c3265.tar.xz
linux-dev-efe90d273b6f365d37c0f82fbbd68a40982c3265.zip
[ARM] Handle HWCAP_VFP in VFP support code
Don't set HWCAP_VFP in the processor support file; not only does it depend on the processor features, but it also depends on the support code being present. Therefore, only set it if the support code detects that we have a VFP coprocessor attached. Also, move the VFP handling of the coprocessor access register into the VFP support code. Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
-rw-r--r--arch/arm/kernel/setup.c3
-rw-r--r--arch/arm/mm/proc-arm926.S2
-rw-r--r--arch/arm/mm/proc-v6.S7
-rw-r--r--arch/arm/vfp/vfpmodule.c26
-rw-r--r--include/asm-arm/system.h43
5 files changed, 57 insertions, 24 deletions
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index 238dd9b6db84..cf2bd4242803 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -354,9 +354,6 @@ static void __init setup_processor(void)
#ifndef CONFIG_ARM_THUMB
elf_hwcap &= ~HWCAP_THUMB;
#endif
-#ifndef CONFIG_VFP
- elf_hwcap &= ~HWCAP_VFP;
-#endif
cpu_proc_init();
}
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S
index 8628ed29a955..080efac9d9ff 100644
--- a/arch/arm/mm/proc-arm926.S
+++ b/arch/arm/mm/proc-arm926.S
@@ -480,7 +480,7 @@ __arm926_proc_info:
b __arm926_setup
.long cpu_arch_name
.long cpu_elf_name
- .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_VFP|HWCAP_EDSP|HWCAP_JAVA
+ .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_JAVA
.long cpu_arm926_name
.long arm926_processor_functions
.long v4wbi_tlb_fns
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index b440c8a1d345..c40baf8a47f0 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -207,11 +207,6 @@ __v6_setup:
#endif
mcr p15, 0, r4, c2, c0, 1 @ load TTB1
#endif /* CONFIG_MMU */
-#ifdef CONFIG_VFP
- mrc p15, 0, r0, c1, c0, 2
- orr r0, r0, #(0xf << 20)
- mcr p15, 0, r0, c1, c0, 2 @ Enable full access to VFP
-#endif
adr r5, v6_crval
ldmia r5, {r5, r6}
mrc p15, 0, r0, c1, c0, 0 @ read control register
@@ -273,7 +268,7 @@ __v6_proc_info:
b __v6_setup
.long cpu_arch_name
.long cpu_elf_name
- .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_VFP|HWCAP_EDSP|HWCAP_JAVA
+ .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_JAVA
.long cpu_v6_name
.long v6_processor_functions
.long v6wbi_tlb_fns
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index f08eafbddcc1..e26cc1f59948 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -263,13 +263,24 @@ void VFP9_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
if (exceptions)
vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs);
}
-
+
/*
* VFP support code initialisation.
*/
static int __init vfp_init(void)
{
unsigned int vfpsid;
+ unsigned int cpu_arch = cpu_architecture();
+ u32 access = 0;
+
+ if (cpu_arch >= CPU_ARCH_ARMv6) {
+ access = get_copro_access();
+
+ /*
+ * Enable full access to VFP (cp10 and cp11)
+ */
+ set_copro_access(access | CPACC_FULL(10) | CPACC_FULL(11));
+ }
/*
* First check that there is a VFP that we can use.
@@ -281,6 +292,12 @@ static int __init vfp_init(void)
printk(KERN_INFO "VFP support v0.3: ");
if (VFP_arch) {
printk("not present\n");
+
+ /*
+ * Restore the copro access register.
+ */
+ if (cpu_arch >= CPU_ARCH_ARMv6)
+ set_copro_access(access);
} else if (vfpsid & FPSID_NODOUBLE) {
printk("no double precision support\n");
} else {
@@ -291,9 +308,16 @@ static int __init vfp_init(void)
(vfpsid & FPSID_PART_MASK) >> FPSID_PART_BIT,
(vfpsid & FPSID_VARIANT_MASK) >> FPSID_VARIANT_BIT,
(vfpsid & FPSID_REV_MASK) >> FPSID_REV_BIT);
+
vfp_vector = vfp_support_entry;
thread_register_notifier(&vfp_notifier_block);
+
+ /*
+ * We detected VFP, and the support code is
+ * in place; report VFP support to userspace.
+ */
+ elf_hwcap |= HWCAP_VFP;
}
return 0;
}
diff --git a/include/asm-arm/system.h b/include/asm-arm/system.h
index f05fbe31576c..f60faccf01fa 100644
--- a/include/asm-arm/system.h
+++ b/include/asm-arm/system.h
@@ -139,19 +139,36 @@ static inline int cpu_is_xsc3(void)
#define cpu_is_xscale() 1
#endif
-#define set_cr(x) \
- __asm__ __volatile__( \
- "mcr p15, 0, %0, c1, c0, 0 @ set CR" \
- : : "r" (x) : "cc")
-
-#define get_cr() \
- ({ \
- unsigned int __val; \
- __asm__ __volatile__( \
- "mrc p15, 0, %0, c1, c0, 0 @ get CR" \
- : "=r" (__val) : : "cc"); \
- __val; \
- })
+static inline unsigned int get_cr(void)
+{
+ unsigned int val;
+ asm("mrc p15, 0, %0, c1, c0, 0 @ get CR" : "=r" (val) : : "cc");
+ return val;
+}
+
+static inline void set_cr(unsigned int val)
+{
+ asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR"
+ : : "r" (val) : "cc");
+}
+
+#define CPACC_FULL(n) (3 << (n * 2))
+#define CPACC_SVC(n) (1 << (n * 2))
+#define CPACC_DISABLE(n) (0 << (n * 2))
+
+static inline unsigned int get_copro_access(void)
+{
+ unsigned int val;
+ asm("mrc p15, 0, %0, c1, c0, 2 @ get copro access"
+ : "=r" (val) : : "cc");
+ return val;
+}
+
+static inline void set_copro_access(unsigned int val)
+{
+ asm volatile("mcr p15, 0, %0, c1, c0, 2 @ set copro access"
+ : : "r" (val) : "cc");
+}
extern unsigned long cr_no_alignment; /* defined in entry-armv.S */
extern unsigned long cr_alignment; /* defined in entry-armv.S */