aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-generic
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-generic')
-rw-r--r--include/asm-generic/bug.h2
-rw-r--r--include/asm-generic/mutex-dec.h26
-rw-r--r--include/asm-generic/mutex-xchg.h9
-rw-r--r--include/asm-generic/rtc.h24
-rw-r--r--include/asm-generic/vmlinux.lds.h14
5 files changed, 29 insertions, 46 deletions
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 0f6dabd4b517..12c07c1866b2 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -41,7 +41,7 @@ extern void warn_slowpath(const char *file, const int line,
#define __WARN() warn_on_slowpath(__FILE__, __LINE__)
#define __WARN_printf(arg...) warn_slowpath(__FILE__, __LINE__, arg)
#else
-#define __WARN_printf(arg...) __WARN()
+#define __WARN_printf(arg...) do { printk(arg); __WARN(); } while (0)
#endif
#ifndef WARN_ON
diff --git a/include/asm-generic/mutex-dec.h b/include/asm-generic/mutex-dec.h
index ed108be6743f..f104af7cf437 100644
--- a/include/asm-generic/mutex-dec.h
+++ b/include/asm-generic/mutex-dec.h
@@ -22,8 +22,6 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
{
if (unlikely(atomic_dec_return(count) < 0))
fail_fn(count);
- else
- smp_mb();
}
/**
@@ -41,10 +39,7 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
{
if (unlikely(atomic_dec_return(count) < 0))
return fail_fn(count);
- else {
- smp_mb();
- return 0;
- }
+ return 0;
}
/**
@@ -63,7 +58,6 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
static inline void
__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
{
- smp_mb();
if (unlikely(atomic_inc_return(count) <= 0))
fail_fn(count);
}
@@ -88,25 +82,9 @@ __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
static inline int
__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
{
- /*
- * We have two variants here. The cmpxchg based one is the best one
- * because it never induce a false contention state. It is included
- * here because architectures using the inc/dec algorithms over the
- * xchg ones are much more likely to support cmpxchg natively.
- *
- * If not we fall back to the spinlock based variant - that is
- * just as efficient (and simpler) as a 'destructive' probing of
- * the mutex state would be.
- */
-#ifdef __HAVE_ARCH_CMPXCHG
- if (likely(atomic_cmpxchg(count, 1, 0) == 1)) {
- smp_mb();
+ if (likely(atomic_cmpxchg(count, 1, 0) == 1))
return 1;
- }
return 0;
-#else
- return fail_fn(count);
-#endif
}
#endif
diff --git a/include/asm-generic/mutex-xchg.h b/include/asm-generic/mutex-xchg.h
index 7b9cd2cbfebe..580a6d35c700 100644
--- a/include/asm-generic/mutex-xchg.h
+++ b/include/asm-generic/mutex-xchg.h
@@ -27,8 +27,6 @@ __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
{
if (unlikely(atomic_xchg(count, 0) != 1))
fail_fn(count);
- else
- smp_mb();
}
/**
@@ -46,10 +44,7 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
{
if (unlikely(atomic_xchg(count, 0) != 1))
return fail_fn(count);
- else {
- smp_mb();
- return 0;
- }
+ return 0;
}
/**
@@ -67,7 +62,6 @@ __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
static inline void
__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
{
- smp_mb();
if (unlikely(atomic_xchg(count, 1) != 0))
fail_fn(count);
}
@@ -110,7 +104,6 @@ __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
if (prev < 0)
prev = 0;
}
- smp_mb();
return prev;
}
diff --git a/include/asm-generic/rtc.h b/include/asm-generic/rtc.h
index 71ef3f0b9685..89061c1a67d4 100644
--- a/include/asm-generic/rtc.h
+++ b/include/asm-generic/rtc.h
@@ -84,12 +84,12 @@ static inline unsigned int get_rtc_time(struct rtc_time *time)
if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
{
- BCD_TO_BIN(time->tm_sec);
- BCD_TO_BIN(time->tm_min);
- BCD_TO_BIN(time->tm_hour);
- BCD_TO_BIN(time->tm_mday);
- BCD_TO_BIN(time->tm_mon);
- BCD_TO_BIN(time->tm_year);
+ time->tm_sec = bcd2bin(time->tm_sec);
+ time->tm_min = bcd2bin(time->tm_min);
+ time->tm_hour = bcd2bin(time->tm_hour);
+ time->tm_mday = bcd2bin(time->tm_mday);
+ time->tm_mon = bcd2bin(time->tm_mon);
+ time->tm_year = bcd2bin(time->tm_year);
}
#ifdef CONFIG_MACH_DECSTATION
@@ -159,12 +159,12 @@ static inline int set_rtc_time(struct rtc_time *time)
if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY)
|| RTC_ALWAYS_BCD) {
- BIN_TO_BCD(sec);
- BIN_TO_BCD(min);
- BIN_TO_BCD(hrs);
- BIN_TO_BCD(day);
- BIN_TO_BCD(mon);
- BIN_TO_BCD(yrs);
+ sec = bin2bcd(sec);
+ min = bin2bcd(min);
+ hrs = bin2bcd(hrs);
+ day = bin2bcd(day);
+ mon = bin2bcd(mon);
+ yrs = bin2bcd(yrs);
}
save_control = CMOS_READ(RTC_CONTROL);
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 74c5faf26c05..80744606bad1 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -37,6 +37,13 @@
#define MEM_DISCARD(sec) *(.mem##sec)
#endif
+#ifdef CONFIG_FTRACE_MCOUNT_RECORD
+#define MCOUNT_REC() VMLINUX_SYMBOL(__start_mcount_loc) = .; \
+ *(__mcount_loc) \
+ VMLINUX_SYMBOL(__stop_mcount_loc) = .;
+#else
+#define MCOUNT_REC()
+#endif
/* .data section */
#define DATA_DATA \
@@ -52,7 +59,10 @@
. = ALIGN(8); \
VMLINUX_SYMBOL(__start___markers) = .; \
*(__markers) \
- VMLINUX_SYMBOL(__stop___markers) = .;
+ VMLINUX_SYMBOL(__stop___markers) = .; \
+ VMLINUX_SYMBOL(__start___tracepoints) = .; \
+ *(__tracepoints) \
+ VMLINUX_SYMBOL(__stop___tracepoints) = .;
#define RO_DATA(align) \
. = ALIGN((align)); \
@@ -61,6 +71,7 @@
*(.rodata) *(.rodata.*) \
*(__vermagic) /* Kernel version magic */ \
*(__markers_strings) /* Markers: strings */ \
+ *(__tracepoints_strings)/* Tracepoints: strings */ \
} \
\
.rodata1 : AT(ADDR(.rodata1) - LOAD_OFFSET) { \
@@ -188,6 +199,7 @@
/* __*init sections */ \
__init_rodata : AT(ADDR(__init_rodata) - LOAD_OFFSET) { \
*(.ref.rodata) \
+ MCOUNT_REC() \
DEV_KEEP(init.rodata) \
DEV_KEEP(exit.rodata) \
CPU_KEEP(init.rodata) \