aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile
diff options
context:
space:
mode:
Diffstat (limited to 'arch/tile')
-rw-r--r--arch/tile/Kconfig3
-rw-r--r--arch/tile/gxio/mpipe.c37
-rw-r--r--arch/tile/include/asm/Kbuild1
-rw-r--r--arch/tile/include/asm/irqflags.h4
-rw-r--r--arch/tile/include/asm/mmu_context.h6
-rw-r--r--arch/tile/include/asm/sections.h3
-rw-r--r--arch/tile/include/asm/vdso.h20
-rw-r--r--arch/tile/include/uapi/arch/sim_def.h10
-rw-r--r--arch/tile/kernel/irq.c14
-rw-r--r--arch/tile/kernel/messaging.c4
-rw-r--r--arch/tile/kernel/perf_event.c12
-rw-r--r--arch/tile/kernel/process.c2
-rw-r--r--arch/tile/kernel/setup.c3
-rw-r--r--arch/tile/kernel/single_step.c4
-rw-r--r--arch/tile/kernel/smp.c3
-rw-r--r--arch/tile/kernel/smpboot.c6
-rw-r--r--arch/tile/kernel/time.c69
-rw-r--r--arch/tile/kernel/traps.c2
-rw-r--r--arch/tile/kernel/vdso/vdso.lds.S2
-rw-r--r--arch/tile/kernel/vdso/vgettimeofday.c176
-rw-r--r--arch/tile/kernel/vmlinux.lds.S2
-rw-r--r--arch/tile/mm/highmem.c2
-rw-r--r--arch/tile/mm/init.c16
23 files changed, 271 insertions, 130 deletions
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index a3ffe2dd4832..7cca41842a9e 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -134,6 +134,7 @@ config TILEGX
select HAVE_KPROBES
select HAVE_KRETPROBES
select HAVE_ARCH_KGDB
+ select ARCH_SUPPORTS_ATOMIC_RMW
config TILEPRO
def_bool !TILEGX
@@ -191,8 +192,6 @@ source "kernel/Kconfig.hz"
config KEXEC
bool "kexec system call"
- select CRYPTO
- select CRYPTO_SHA256
---help---
kexec is a system call that implements the ability to shutdown your
current kernel, and to start another kernel. It is like a reboot
diff --git a/arch/tile/gxio/mpipe.c b/arch/tile/gxio/mpipe.c
index 5301a9ffbae1..320ff5e6e61e 100644
--- a/arch/tile/gxio/mpipe.c
+++ b/arch/tile/gxio/mpipe.c
@@ -29,6 +29,32 @@
/* HACK: Avoid pointless "shadow" warnings. */
#define link link_shadow
+/**
+ * strscpy - Copy a C-string into a sized buffer, but only if it fits
+ * @dest: Where to copy the string to
+ * @src: Where to copy the string from
+ * @size: size of destination buffer
+ *
+ * Use this routine to avoid copying too-long strings.
+ * The routine returns the total number of bytes copied
+ * (including the trailing NUL) or zero if the buffer wasn't
+ * big enough. To ensure that programmers pay attention
+ * to the return code, the destination has a single NUL
+ * written at the front (if size is non-zero) when the
+ * buffer is not big enough.
+ */
+static size_t strscpy(char *dest, const char *src, size_t size)
+{
+ size_t len = strnlen(src, size) + 1;
+ if (len > size) {
+ if (size)
+ dest[0] = '\0';
+ return 0;
+ }
+ memcpy(dest, src, len);
+ return len;
+}
+
int gxio_mpipe_init(gxio_mpipe_context_t *context, unsigned int mpipe_index)
{
char file[32];
@@ -511,8 +537,8 @@ int gxio_mpipe_link_instance(const char *link_name)
if (!context)
return GXIO_ERR_NO_DEVICE;
- strncpy(name.name, link_name, sizeof(name.name));
- name.name[GXIO_MPIPE_LINK_NAME_LEN - 1] = '\0';
+ if (strscpy(name.name, link_name, sizeof(name.name)) == 0)
+ return GXIO_ERR_NO_DEVICE;
return gxio_mpipe_info_instance_aux(context, name);
}
@@ -529,7 +555,8 @@ int gxio_mpipe_link_enumerate_mac(int idx, char *link_name, uint8_t *link_mac)
rv = gxio_mpipe_info_enumerate_aux(context, idx, &name, &mac);
if (rv >= 0) {
- strncpy(link_name, name.name, sizeof(name.name));
+ if (strscpy(link_name, name.name, sizeof(name.name)) == 0)
+ return GXIO_ERR_INVAL_MEMORY_SIZE;
memcpy(link_mac, mac.mac, sizeof(mac.mac));
}
@@ -545,8 +572,8 @@ int gxio_mpipe_link_open(gxio_mpipe_link_t *link,
_gxio_mpipe_link_name_t name;
int rv;
- strncpy(name.name, link_name, sizeof(name.name));
- name.name[GXIO_MPIPE_LINK_NAME_LEN - 1] = '\0';
+ if (strscpy(name.name, link_name, sizeof(name.name)) == 0)
+ return GXIO_ERR_NO_DEVICE;
rv = gxio_mpipe_link_open_aux(context, name, flags);
if (rv < 0)
diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild
index 0aa5675e7025..e6462b8a6284 100644
--- a/arch/tile/include/asm/Kbuild
+++ b/arch/tile/include/asm/Kbuild
@@ -17,6 +17,7 @@ generic-y += ioctl.h
generic-y += ioctls.h
generic-y += ipcbuf.h
generic-y += irq_regs.h
+generic-y += irq_work.h
generic-y += local.h
generic-y += local64.h
generic-y += mcs_spinlock.h
diff --git a/arch/tile/include/asm/irqflags.h b/arch/tile/include/asm/irqflags.h
index 71af5747874d..60d62a292fce 100644
--- a/arch/tile/include/asm/irqflags.h
+++ b/arch/tile/include/asm/irqflags.h
@@ -140,12 +140,12 @@ extern unsigned int debug_smp_processor_id(void);
/*
* Read the set of maskable interrupts.
- * We avoid the preemption warning here via __this_cpu_ptr since even
+ * We avoid the preemption warning here via raw_cpu_ptr since even
* if irqs are already enabled, it's harmless to read the wrong cpu's
* enabled mask.
*/
#define arch_local_irqs_enabled() \
- (*__this_cpu_ptr(&interrupts_enabled_mask))
+ (*raw_cpu_ptr(&interrupts_enabled_mask))
/* Re-enable all maskable interrupts. */
#define arch_local_irq_enable() \
diff --git a/arch/tile/include/asm/mmu_context.h b/arch/tile/include/asm/mmu_context.h
index 4734215e2ad4..f67753db1f78 100644
--- a/arch/tile/include/asm/mmu_context.h
+++ b/arch/tile/include/asm/mmu_context.h
@@ -84,7 +84,7 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *t)
* clear any pending DMA interrupts.
*/
if (current->thread.tile_dma_state.enabled)
- install_page_table(mm->pgd, __get_cpu_var(current_asid));
+ install_page_table(mm->pgd, __this_cpu_read(current_asid));
#endif
}
@@ -96,12 +96,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
int cpu = smp_processor_id();
/* Pick new ASID. */
- int asid = __get_cpu_var(current_asid) + 1;
+ int asid = __this_cpu_read(current_asid) + 1;
if (asid > max_asid) {
asid = min_asid;
local_flush_tlb();
}
- __get_cpu_var(current_asid) = asid;
+ __this_cpu_write(current_asid, asid);
/* Clear cpu from the old mm, and set it in the new one. */
cpumask_clear_cpu(cpu, mm_cpumask(prev));
diff --git a/arch/tile/include/asm/sections.h b/arch/tile/include/asm/sections.h
index 5d5d3b739a6b..86a746243dc8 100644
--- a/arch/tile/include/asm/sections.h
+++ b/arch/tile/include/asm/sections.h
@@ -19,9 +19,6 @@
#include <asm-generic/sections.h>
-/* Text and data are at different areas in the kernel VA space. */
-extern char _sinitdata[], _einitdata[];
-
/* Write-once data is writable only till the end of initialization. */
extern char __w1data_begin[], __w1data_end[];
diff --git a/arch/tile/include/asm/vdso.h b/arch/tile/include/asm/vdso.h
index 9f6a78d665fa..9b069692153f 100644
--- a/arch/tile/include/asm/vdso.h
+++ b/arch/tile/include/asm/vdso.h
@@ -15,6 +15,7 @@
#ifndef __TILE_VDSO_H__
#define __TILE_VDSO_H__
+#include <linux/seqlock.h>
#include <linux/types.h>
/*
@@ -26,15 +27,20 @@
*/
struct vdso_data {
- __u64 tz_update_count; /* Timezone atomicity ctr */
- __u64 tb_update_count; /* Timebase atomicity ctr */
- __u64 xtime_tod_stamp; /* TOD clock for xtime */
- __u64 xtime_clock_sec; /* Kernel time second */
- __u64 xtime_clock_nsec; /* Kernel time nanosecond */
- __u64 wtom_clock_sec; /* Wall to monotonic clock second */
- __u64 wtom_clock_nsec; /* Wall to monotonic clock nanosecond */
+ seqcount_t tz_seq; /* Timezone seqlock */
+ seqcount_t tb_seq; /* Timebase seqlock */
+ __u64 cycle_last; /* TOD clock for xtime */
+ __u64 mask; /* Cycle mask */
__u32 mult; /* Cycle to nanosecond multiplier */
__u32 shift; /* Cycle to nanosecond divisor (power of two) */
+ __u64 wall_time_sec;
+ __u64 wall_time_snsec;
+ __u64 monotonic_time_sec;
+ __u64 monotonic_time_snsec;
+ __u64 wall_time_coarse_sec;
+ __u64 wall_time_coarse_nsec;
+ __u64 monotonic_time_coarse_sec;
+ __u64 monotonic_time_coarse_nsec;
__u32 tz_minuteswest; /* Minutes west of Greenwich */
__u32 tz_dsttime; /* Type of dst correction */
};
diff --git a/arch/tile/include/uapi/arch/sim_def.h b/arch/tile/include/uapi/arch/sim_def.h
index 4b44a2b6a09a..1c069537ae41 100644
--- a/arch/tile/include/uapi/arch/sim_def.h
+++ b/arch/tile/include/uapi/arch/sim_def.h
@@ -360,19 +360,19 @@
* @{
*/
-/** Use with with SIM_PROFILER_CHIP_xxx to control the memory controllers. */
+/** Use with SIM_PROFILER_CHIP_xxx to control the memory controllers. */
#define SIM_CHIP_MEMCTL 0x001
-/** Use with with SIM_PROFILER_CHIP_xxx to control the XAUI interface. */
+/** Use with SIM_PROFILER_CHIP_xxx to control the XAUI interface. */
#define SIM_CHIP_XAUI 0x002
-/** Use with with SIM_PROFILER_CHIP_xxx to control the PCIe interface. */
+/** Use with SIM_PROFILER_CHIP_xxx to control the PCIe interface. */
#define SIM_CHIP_PCIE 0x004
-/** Use with with SIM_PROFILER_CHIP_xxx to control the MPIPE interface. */
+/** Use with SIM_PROFILER_CHIP_xxx to control the MPIPE interface. */
#define SIM_CHIP_MPIPE 0x008
-/** Use with with SIM_PROFILER_CHIP_xxx to control the TRIO interface. */
+/** Use with SIM_PROFILER_CHIP_xxx to control the TRIO interface. */
#define SIM_CHIP_TRIO 0x010
/** Reference all chip devices. */
diff --git a/arch/tile/kernel/irq.c b/arch/tile/kernel/irq.c
index 637f2ffaa5f5..ba85765e1436 100644
--- a/arch/tile/kernel/irq.c
+++ b/arch/tile/kernel/irq.c
@@ -73,7 +73,7 @@ static DEFINE_PER_CPU(int, irq_depth);
*/
void tile_dev_intr(struct pt_regs *regs, int intnum)
{
- int depth = __get_cpu_var(irq_depth)++;
+ int depth = __this_cpu_inc_return(irq_depth);
unsigned long original_irqs;
unsigned long remaining_irqs;
struct pt_regs *old_regs;
@@ -120,7 +120,7 @@ void tile_dev_intr(struct pt_regs *regs, int intnum)
/* Count device irqs; Linux IPIs are counted elsewhere. */
if (irq != IRQ_RESCHEDULE)
- __get_cpu_var(irq_stat).irq_dev_intr_count++;
+ __this_cpu_inc(irq_stat.irq_dev_intr_count);
generic_handle_irq(irq);
}
@@ -130,10 +130,10 @@ void tile_dev_intr(struct pt_regs *regs, int intnum)
* including any that were reenabled during interrupt
* handling.
*/
- if (depth == 0)
- unmask_irqs(~__get_cpu_var(irq_disable_mask));
+ if (depth == 1)
+ unmask_irqs(~__this_cpu_read(irq_disable_mask));
- __get_cpu_var(irq_depth)--;
+ __this_cpu_dec(irq_depth);
/*
* Track time spent against the current process again and
@@ -151,7 +151,7 @@ void tile_dev_intr(struct pt_regs *regs, int intnum)
static void tile_irq_chip_enable(struct irq_data *d)
{
get_cpu_var(irq_disable_mask) &= ~(1UL << d->irq);
- if (__get_cpu_var(irq_depth) == 0)
+ if (__this_cpu_read(irq_depth) == 0)
unmask_irqs(1UL << d->irq);
put_cpu_var(irq_disable_mask);
}
@@ -197,7 +197,7 @@ static void tile_irq_chip_ack(struct irq_data *d)
*/
static void tile_irq_chip_eoi(struct irq_data *d)
{
- if (!(__get_cpu_var(irq_disable_mask) & (1UL << d->irq)))
+ if (!(__this_cpu_read(irq_disable_mask) & (1UL << d->irq)))
unmask_irqs(1UL << d->irq);
}
diff --git a/arch/tile/kernel/messaging.c b/arch/tile/kernel/messaging.c
index 7867266f9716..ac950be1318e 100644
--- a/arch/tile/kernel/messaging.c
+++ b/arch/tile/kernel/messaging.c
@@ -28,7 +28,7 @@ static DEFINE_PER_CPU(HV_MsgState, msg_state);
void init_messaging(void)
{
/* Allocate storage for messages in kernel space */
- HV_MsgState *state = &__get_cpu_var(msg_state);
+ HV_MsgState *state = this_cpu_ptr(&msg_state);
int rc = hv_register_message_state(state);
if (rc != HV_OK)
panic("hv_register_message_state: error %d", rc);
@@ -96,7 +96,7 @@ void hv_message_intr(struct pt_regs *regs, int intnum)
struct hv_driver_cb *cb =
(struct hv_driver_cb *)him->intarg;
cb->callback(cb, him->intdata);
- __get_cpu_var(irq_stat).irq_hv_msg_count++;
+ __this_cpu_inc(irq_stat.irq_hv_msg_count);
}
}
diff --git a/arch/tile/kernel/perf_event.c b/arch/tile/kernel/perf_event.c
index 2bf6c9c135c1..bb509cee3b59 100644
--- a/arch/tile/kernel/perf_event.c
+++ b/arch/tile/kernel/perf_event.c
@@ -590,7 +590,7 @@ static int tile_event_set_period(struct perf_event *event)
*/
static void tile_pmu_stop(struct perf_event *event, int flags)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
@@ -616,7 +616,7 @@ static void tile_pmu_stop(struct perf_event *event, int flags)
*/
static void tile_pmu_start(struct perf_event *event, int flags)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int idx = event->hw.idx;
if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
@@ -650,7 +650,7 @@ static void tile_pmu_start(struct perf_event *event, int flags)
*/
static int tile_pmu_add(struct perf_event *event, int flags)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc;
unsigned long mask;
int b, max_cnt;
@@ -706,7 +706,7 @@ static int tile_pmu_add(struct perf_event *event, int flags)
*/
static void tile_pmu_del(struct perf_event *event, int flags)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int i;
/*
@@ -880,14 +880,14 @@ static struct pmu tilera_pmu = {
int tile_pmu_handle_irq(struct pt_regs *regs, int fault)
{
struct perf_sample_data data;
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct perf_event *event;
struct hw_perf_event *hwc;
u64 val;
unsigned long status;
int bit;
- __get_cpu_var(perf_irqs)++;
+ __this_cpu_inc(perf_irqs);
if (!atomic_read(&tile_active_events))
return 0;
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index 16ed58948757..0050cbc1d9de 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -64,7 +64,7 @@ early_param("idle", idle_setup);
void arch_cpu_idle(void)
{
- __get_cpu_var(irq_stat).idle_timestamp = jiffies;
+ __this_cpu_write(irq_stat.idle_timestamp, jiffies);
_cpu_idle();
}
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index 112ababa9e55..b9736ded06f2 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -1218,7 +1218,8 @@ static void __init validate_hv(void)
* various asid variables to their appropriate initial states.
*/
asid_range = hv_inquire_asid(0);
- __get_cpu_var(current_asid) = min_asid = asid_range.start;
+ min_asid = asid_range.start;
+ __this_cpu_write(current_asid, min_asid);
max_asid = asid_range.start + asid_range.size - 1;
if (hv_confstr(HV_CONFSTR_CHIP_MODEL, (HV_VirtAddr)chip_model,
diff --git a/arch/tile/kernel/single_step.c b/arch/tile/kernel/single_step.c
index de07fa7d1315..6cb2ce31b5a2 100644
--- a/arch/tile/kernel/single_step.c
+++ b/arch/tile/kernel/single_step.c
@@ -740,7 +740,7 @@ static DEFINE_PER_CPU(unsigned long, ss_saved_pc);
void gx_singlestep_handle(struct pt_regs *regs, int fault_num)
{
- unsigned long *ss_pc = &__get_cpu_var(ss_saved_pc);
+ unsigned long *ss_pc = this_cpu_ptr(&ss_saved_pc);
struct thread_info *info = (void *)current_thread_info();
int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP);
unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K);
@@ -766,7 +766,7 @@ void gx_singlestep_handle(struct pt_regs *regs, int fault_num)
void single_step_once(struct pt_regs *regs)
{
- unsigned long *ss_pc = &__get_cpu_var(ss_saved_pc);
+ unsigned long *ss_pc = this_cpu_ptr(&ss_saved_pc);
unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K);
*ss_pc = regs->pc;
diff --git a/arch/tile/kernel/smp.c b/arch/tile/kernel/smp.c
index 01e8ab29f43a..d3c4ed780ce2 100644
--- a/arch/tile/kernel/smp.c
+++ b/arch/tile/kernel/smp.c
@@ -183,12 +183,13 @@ void flush_icache_range(unsigned long start, unsigned long end)
preempt_enable();
}
}
+EXPORT_SYMBOL(flush_icache_range);
/* Called when smp_send_reschedule() triggers IRQ_RESCHEDULE. */
static irqreturn_t handle_reschedule_ipi(int irq, void *token)
{
- __get_cpu_var(irq_stat).irq_resched_count++;
+ __this_cpu_inc(irq_stat.irq_resched_count);
scheduler_ipi();
return IRQ_HANDLED;
diff --git a/arch/tile/kernel/smpboot.c b/arch/tile/kernel/smpboot.c
index 732e9d138661..0d59a1b60c74 100644
--- a/arch/tile/kernel/smpboot.c
+++ b/arch/tile/kernel/smpboot.c
@@ -41,7 +41,7 @@ void __init smp_prepare_boot_cpu(void)
int cpu = smp_processor_id();
set_cpu_online(cpu, 1);
set_cpu_present(cpu, 1);
- __get_cpu_var(cpu_state) = CPU_ONLINE;
+ __this_cpu_write(cpu_state, CPU_ONLINE);
init_messaging();
}
@@ -158,7 +158,7 @@ static void start_secondary(void)
/* printk(KERN_DEBUG "Initializing CPU#%d\n", cpuid); */
/* Initialize the current asid for our first page table. */
- __get_cpu_var(current_asid) = min_asid;
+ __this_cpu_write(current_asid, min_asid);
/* Set up this thread as another owner of the init_mm */
atomic_inc(&init_mm.mm_count);
@@ -201,7 +201,7 @@ void online_secondary(void)
notify_cpu_starting(smp_processor_id());
set_cpu_online(smp_processor_id(), 1);
- __get_cpu_var(cpu_state) = CPU_ONLINE;
+ __this_cpu_write(cpu_state, CPU_ONLINE);
/* Set up tile-specific state for this cpu. */
setup_cpu(0);
diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c
index d8fbc289e680..b854a1cd0079 100644
--- a/arch/tile/kernel/time.c
+++ b/arch/tile/kernel/time.c
@@ -162,7 +162,7 @@ static DEFINE_PER_CPU(struct clock_event_device, tile_timer) = {
void setup_tile_timer(void)
{
- struct clock_event_device *evt = &__get_cpu_var(tile_timer);
+ struct clock_event_device *evt = this_cpu_ptr(&tile_timer);
/* Fill in fields that are speed-specific. */
clockevents_calc_mult_shift(evt, cycles_per_sec, TILE_MINSEC);
@@ -182,7 +182,7 @@ void setup_tile_timer(void)
void do_timer_interrupt(struct pt_regs *regs, int fault_num)
{
struct pt_regs *old_regs = set_irq_regs(regs);
- struct clock_event_device *evt = &__get_cpu_var(tile_timer);
+ struct clock_event_device *evt = this_cpu_ptr(&tile_timer);
/*
* Mask the timer interrupt here, since we are a oneshot timer
@@ -194,7 +194,7 @@ void do_timer_interrupt(struct pt_regs *regs, int fault_num)
irq_enter();
/* Track interrupt count. */
- __get_cpu_var(irq_stat).irq_timer_count++;
+ __this_cpu_inc(irq_stat.irq_timer_count);
/* Call the generic timer handler */
evt->event_handler(evt);
@@ -235,7 +235,7 @@ cycles_t ns2cycles(unsigned long nsecs)
* We do not have to disable preemption here as each core has the same
* clock frequency.
*/
- struct clock_event_device *dev = &__raw_get_cpu_var(tile_timer);
+ struct clock_event_device *dev = raw_cpu_ptr(&tile_timer);
/*
* as in clocksource.h and x86's timer.h, we split the calculation
@@ -249,33 +249,52 @@ cycles_t ns2cycles(unsigned long nsecs)
void update_vsyscall_tz(void)
{
- /* Userspace gettimeofday will spin while this value is odd. */
- ++vdso_data->tz_update_count;
- smp_wmb();
+ write_seqcount_begin(&vdso_data->tz_seq);
vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
vdso_data->tz_dsttime = sys_tz.tz_dsttime;
- smp_wmb();
- ++vdso_data->tz_update_count;
+ write_seqcount_end(&vdso_data->tz_seq);
}
void update_vsyscall(struct timekeeper *tk)
{
- struct timespec *wtm = &tk->wall_to_monotonic;
- struct clocksource *clock = tk->tkr.clock;
-
- if (clock != &cycle_counter_cs)
+ if (tk->tkr.clock != &cycle_counter_cs)
return;
- /* Userspace gettimeofday will spin while this value is odd. */
- ++vdso_data->tb_update_count;
- smp_wmb();
- vdso_data->xtime_tod_stamp = tk->tkr.cycle_last;
- vdso_data->xtime_clock_sec = tk->xtime_sec;
- vdso_data->xtime_clock_nsec = tk->tkr.xtime_nsec;
- vdso_data->wtom_clock_sec = wtm->tv_sec;
- vdso_data->wtom_clock_nsec = wtm->tv_nsec;
- vdso_data->mult = tk->tkr.mult;
- vdso_data->shift = tk->tkr.shift;
- smp_wmb();
- ++vdso_data->tb_update_count;
+ write_seqcount_begin(&vdso_data->tb_seq);
+
+ vdso_data->cycle_last = tk->tkr.cycle_last;
+ vdso_data->mask = tk->tkr.mask;
+ vdso_data->mult = tk->tkr.mult;
+ vdso_data->shift = tk->tkr.shift;
+
+ vdso_data->wall_time_sec = tk->xtime_sec;
+ vdso_data->wall_time_snsec = tk->tkr.xtime_nsec;
+
+ vdso_data->monotonic_time_sec = tk->xtime_sec
+ + tk->wall_to_monotonic.tv_sec;
+ vdso_data->monotonic_time_snsec = tk->tkr.xtime_nsec
+ + ((u64)tk->wall_to_monotonic.tv_nsec
+ << tk->tkr.shift);
+ while (vdso_data->monotonic_time_snsec >=
+ (((u64)NSEC_PER_SEC) << tk->tkr.shift)) {
+ vdso_data->monotonic_time_snsec -=
+ ((u64)NSEC_PER_SEC) << tk->tkr.shift;
+ vdso_data->monotonic_time_sec++;
+ }
+
+ vdso_data->wall_time_coarse_sec = tk->xtime_sec;
+ vdso_data->wall_time_coarse_nsec = (long)(tk->tkr.xtime_nsec >>
+ tk->tkr.shift);
+
+ vdso_data->monotonic_time_coarse_sec =
+ vdso_data->wall_time_coarse_sec + tk->wall_to_monotonic.tv_sec;
+ vdso_data->monotonic_time_coarse_nsec =
+ vdso_data->wall_time_coarse_nsec + tk->wall_to_monotonic.tv_nsec;
+
+ while (vdso_data->monotonic_time_coarse_nsec >= NSEC_PER_SEC) {
+ vdso_data->monotonic_time_coarse_nsec -= NSEC_PER_SEC;
+ vdso_data->monotonic_time_coarse_sec++;
+ }
+
+ write_seqcount_end(&vdso_data->tb_seq);
}
diff --git a/arch/tile/kernel/traps.c b/arch/tile/kernel/traps.c
index f3ceb6308e42..86900ccd4977 100644
--- a/arch/tile/kernel/traps.c
+++ b/arch/tile/kernel/traps.c
@@ -277,7 +277,7 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
if (fixup_exception(regs)) /* ILL_TRANS or UNALIGN_DATA */
return;
if (fault_num >= 0 &&
- fault_num < sizeof(int_name)/sizeof(int_name[0]) &&
+ fault_num < ARRAY_SIZE(int_name) &&
int_name[fault_num] != NULL)
name = int_name[fault_num];
else
diff --git a/arch/tile/kernel/vdso/vdso.lds.S b/arch/tile/kernel/vdso/vdso.lds.S
index 041cd6c39c83..731529f3f06f 100644
--- a/arch/tile/kernel/vdso/vdso.lds.S
+++ b/arch/tile/kernel/vdso/vdso.lds.S
@@ -82,6 +82,8 @@ VERSION
__vdso_rt_sigreturn;
__vdso_gettimeofday;
gettimeofday;
+ __vdso_clock_gettime;
+ clock_gettime;
local:*;
};
}
diff --git a/arch/tile/kernel/vdso/vgettimeofday.c b/arch/tile/kernel/vdso/vgettimeofday.c
index e933fb9fbf5c..8bb21eda07d8 100644
--- a/arch/tile/kernel/vdso/vgettimeofday.c
+++ b/arch/tile/kernel/vdso/vgettimeofday.c
@@ -15,6 +15,7 @@
#define VDSO_BUILD /* avoid some shift warnings for -m32 in <asm/page.h> */
#include <linux/time.h>
#include <asm/timex.h>
+#include <asm/unistd.h>
#include <asm/vdso.h>
#if CHIP_HAS_SPLIT_CYCLE()
@@ -35,6 +36,11 @@ static inline cycles_t get_cycles_inline(void)
#define get_cycles get_cycles_inline
#endif
+struct syscall_return_value {
+ long value;
+ long error;
+};
+
/*
* Find out the vDSO data page address in the process address space.
*/
@@ -50,59 +56,143 @@ inline unsigned long get_datapage(void)
return ret;
}
-int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
+static inline u64 vgetsns(struct vdso_data *vdso)
+{
+ return ((get_cycles() - vdso->cycle_last) & vdso->mask) * vdso->mult;
+}
+
+static inline int do_realtime(struct vdso_data *vdso, struct timespec *ts)
+{
+ unsigned count;
+ u64 ns;
+
+ do {
+ count = read_seqcount_begin(&vdso->tb_seq);
+ ts->tv_sec = vdso->wall_time_sec;
+ ns = vdso->wall_time_snsec;
+ ns += vgetsns(vdso);
+ ns >>= vdso->shift;
+ } while (unlikely(read_seqcount_retry(&vdso->tb_seq, count)));
+
+ ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
+ ts->tv_nsec = ns;
+
+ return 0;
+}
+
+static inline int do_monotonic(struct vdso_data *vdso, struct timespec *ts)
+{
+ unsigned count;
+ u64 ns;
+
+ do {
+ count = read_seqcount_begin(&vdso->tb_seq);
+ ts->tv_sec = vdso->monotonic_time_sec;
+ ns = vdso->monotonic_time_snsec;
+ ns += vgetsns(vdso);
+ ns >>= vdso->shift;
+ } while (unlikely(read_seqcount_retry(&vdso->tb_seq, count)));
+
+ ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
+ ts->tv_nsec = ns;
+
+ return 0;
+}
+
+static inline int do_realtime_coarse(struct vdso_data *vdso,
+ struct timespec *ts)
+{
+ unsigned count;
+
+ do {
+ count = read_seqcount_begin(&vdso->tb_seq);
+ ts->tv_sec = vdso->wall_time_coarse_sec;
+ ts->tv_nsec = vdso->wall_time_coarse_nsec;
+ } while (unlikely(read_seqcount_retry(&vdso->tb_seq, count)));
+
+ return 0;
+}
+
+static inline int do_monotonic_coarse(struct vdso_data *vdso,
+ struct timespec *ts)
{
- cycles_t cycles;
- unsigned long count, sec, ns;
- volatile struct vdso_data *vdso_data;
+ unsigned count;
+
+ do {
+ count = read_seqcount_begin(&vdso->tb_seq);
+ ts->tv_sec = vdso->monotonic_time_coarse_sec;
+ ts->tv_nsec = vdso->monotonic_time_coarse_nsec;
+ } while (unlikely(read_seqcount_retry(&vdso->tb_seq, count)));
+
+ return 0;
+}
+
+struct syscall_return_value __vdso_gettimeofday(struct timeval *tv,
+ struct timezone *tz)
+{
+ struct syscall_return_value ret = { 0, 0 };
+ unsigned count;
+ struct vdso_data *vdso = (struct vdso_data *)get_datapage();
- vdso_data = (struct vdso_data *)get_datapage();
/* The use of the timezone is obsolete, normally tz is NULL. */
if (unlikely(tz != NULL)) {
- while (1) {
- /* Spin until the update finish. */
- count = vdso_data->tz_update_count;
- if (count & 1)
- continue;
-
- tz->tz_minuteswest = vdso_data->tz_minuteswest;
- tz->tz_dsttime = vdso_data->tz_dsttime;
-
- /* Check whether updated, read again if so. */
- if (count == vdso_data->tz_update_count)
- break;
- }
+ do {
+ count = read_seqcount_begin(&vdso->tz_seq);
+ tz->tz_minuteswest = vdso->tz_minuteswest;
+ tz->tz_dsttime = vdso->tz_dsttime;
+ } while (unlikely(read_seqcount_retry(&vdso->tz_seq, count)));
}
if (unlikely(tv == NULL))
- return 0;
-
- while (1) {
- /* Spin until the update finish. */
- count = vdso_data->tb_update_count;
- if (count & 1)
- continue;
-
- sec = vdso_data->xtime_clock_sec;
- cycles = get_cycles() - vdso_data->xtime_tod_stamp;
- ns = (cycles * vdso_data->mult) + vdso_data->xtime_clock_nsec;
- ns >>= vdso_data->shift;
-
- if (ns >= NSEC_PER_SEC) {
- ns -= NSEC_PER_SEC;
- sec += 1;
- }
-
- /* Check whether updated, read again if so. */
- if (count == vdso_data->tb_update_count)
- break;
- }
+ return ret;
- tv->tv_sec = sec;
- tv->tv_usec = ns / 1000;
+ do_realtime(vdso, (struct timespec *)tv);
+ tv->tv_usec /= 1000;
- return 0;
+ return ret;
}
int gettimeofday(struct timeval *tv, struct timezone *tz)
__attribute__((weak, alias("__vdso_gettimeofday")));
+
+static struct syscall_return_value vdso_fallback_gettime(long clock,
+ struct timespec *ts)
+{
+ struct syscall_return_value ret;
+ __asm__ __volatile__ (
+ "swint1"
+ : "=R00" (ret.value), "=R01" (ret.error)
+ : "R10" (__NR_clock_gettime), "R00" (clock), "R01" (ts)
+ : "r2", "r3", "r4", "r5", "r6", "r7",
+ "r8", "r9", "r11", "r12", "r13", "r14", "r15",
+ "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
+ "r24", "r25", "r26", "r27", "r28", "r29", "memory");
+ return ret;
+}
+
+struct syscall_return_value __vdso_clock_gettime(clockid_t clock,
+ struct timespec *ts)
+{
+ struct vdso_data *vdso = (struct vdso_data *)get_datapage();
+ struct syscall_return_value ret = { 0, 0 };
+
+ switch (clock) {
+ case CLOCK_REALTIME:
+ do_realtime(vdso, ts);
+ return ret;
+ case CLOCK_MONOTONIC:
+ do_monotonic(vdso, ts);
+ return ret;
+ case CLOCK_REALTIME_COARSE:
+ do_realtime_coarse(vdso, ts);
+ return ret;
+ case CLOCK_MONOTONIC_COARSE:
+ do_monotonic_coarse(vdso, ts);
+ return ret;
+ default:
+ return vdso_fallback_gettime(clock, ts);
+ }
+}
+
+int clock_gettime(clockid_t clock, struct timespec *ts)
+ __attribute__((weak, alias("__vdso_clock_gettime")));
diff --git a/arch/tile/kernel/vmlinux.lds.S b/arch/tile/kernel/vmlinux.lds.S
index f1819423ffc9..0e059a0101ea 100644
--- a/arch/tile/kernel/vmlinux.lds.S
+++ b/arch/tile/kernel/vmlinux.lds.S
@@ -66,11 +66,9 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
__init_begin = .;
- VMLINUX_SYMBOL(_sinitdata) = .;
INIT_DATA_SECTION(16) :data =0
PERCPU_SECTION(L2_CACHE_BYTES)
. = ALIGN(PAGE_SIZE);
- VMLINUX_SYMBOL(_einitdata) = .;
__init_end = .;
_sdata = .; /* Start of data section */
diff --git a/arch/tile/mm/highmem.c b/arch/tile/mm/highmem.c
index 0dc218294770..6aa2f2625447 100644
--- a/arch/tile/mm/highmem.c
+++ b/arch/tile/mm/highmem.c
@@ -103,7 +103,7 @@ static void kmap_atomic_register(struct page *page, int type,
spin_lock(&amp_lock);
/* With interrupts disabled, now fill in the per-cpu info. */
- amp = &__get_cpu_var(amps).per_type[type];
+ amp = this_cpu_ptr(&amps.per_type[type]);
amp->page = page;
amp->cpu = smp_processor_id();
amp->va = va;
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c
index bfb3127b4df9..caa270165f86 100644
--- a/arch/tile/mm/init.c
+++ b/arch/tile/mm/init.c
@@ -254,8 +254,8 @@ static pgprot_t __init init_pgprot(ulong address)
* Everything else that isn't data or bss is heap, so mark it
* with the initial heap home (hash-for-home, or this cpu). This
* includes any addresses after the loaded image and any address before
- * _einitdata, since we already captured the case of text before
- * _sinittext, and __pa(einittext) is approximately __pa(sinitdata).
+ * __init_end, since we already captured the case of text before
+ * _sinittext, and __pa(einittext) is approximately __pa(__init_begin).
*
* All the LOWMEM pages that we mark this way will get their
* struct page homecache properly marked later, in set_page_homes().
@@ -263,7 +263,7 @@ static pgprot_t __init init_pgprot(ulong address)
* homes, but with a zero free_time we don't have to actually
* do a flush action the first time we use them, either.
*/
- if (address >= (ulong) _end || address < (ulong) _einitdata)
+ if (address >= (ulong) _end || address < (ulong) __init_end)
return construct_pgprot(PAGE_KERNEL, initial_heap_home());
/* Use hash-for-home if requested for data/bss. */
@@ -593,14 +593,14 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
interrupt_mask_set_mask(-1ULL);
rc = flush_and_install_context(__pa(pgtables),
init_pgprot((unsigned long)pgtables),
- __get_cpu_var(current_asid),
+ __this_cpu_read(current_asid),
cpumask_bits(my_cpu_mask));
interrupt_mask_restore_mask(irqmask);
BUG_ON(rc != 0);
/* Copy the page table back to the normal swapper_pg_dir. */
memcpy(pgd_base, pgtables, sizeof(pgtables));
- __install_page_table(pgd_base, __get_cpu_var(current_asid),
+ __install_page_table(pgd_base, __this_cpu_read(current_asid),
swapper_pgprot);
/*
@@ -632,7 +632,7 @@ int devmem_is_allowed(unsigned long pagenr)
{
return pagenr < kaddr_to_pfn(_end) &&
!(pagenr >= kaddr_to_pfn(&init_thread_union) ||
- pagenr < kaddr_to_pfn(_einitdata)) &&
+ pagenr < kaddr_to_pfn(__init_end)) &&
!(pagenr >= kaddr_to_pfn(_sinittext) ||
pagenr <= kaddr_to_pfn(_einittext-1));
}
@@ -975,8 +975,8 @@ void free_initmem(void)
/* Free the data pages that we won't use again after init. */
free_init_pages("unused kernel data",
- (unsigned long)_sinitdata,
- (unsigned long)_einitdata);
+ (unsigned long)__init_begin,
+ (unsigned long)__init_end);
/*
* Free the pages mapped from 0xc0000000 that correspond to code