aboutsummaryrefslogtreecommitdiffstats
path: root/arch/tile/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/tile/kernel')
-rw-r--r--arch/tile/kernel/irq.c14
-rw-r--r--arch/tile/kernel/messaging.c4
-rw-r--r--arch/tile/kernel/perf_event.c12
-rw-r--r--arch/tile/kernel/process.c2
-rw-r--r--arch/tile/kernel/setup.c3
-rw-r--r--arch/tile/kernel/single_step.c4
-rw-r--r--arch/tile/kernel/smp.c3
-rw-r--r--arch/tile/kernel/smpboot.c6
-rw-r--r--arch/tile/kernel/time.c69
-rw-r--r--arch/tile/kernel/traps.c2
-rw-r--r--arch/tile/kernel/vdso/vdso.lds.S2
-rw-r--r--arch/tile/kernel/vdso/vgettimeofday.c176
-rw-r--r--arch/tile/kernel/vmlinux.lds.S2
13 files changed, 205 insertions, 94 deletions
diff --git a/arch/tile/kernel/irq.c b/arch/tile/kernel/irq.c
index 637f2ffaa5f5..ba85765e1436 100644
--- a/arch/tile/kernel/irq.c
+++ b/arch/tile/kernel/irq.c
@@ -73,7 +73,7 @@ static DEFINE_PER_CPU(int, irq_depth);
*/
void tile_dev_intr(struct pt_regs *regs, int intnum)
{
- int depth = __get_cpu_var(irq_depth)++;
+ int depth = __this_cpu_inc_return(irq_depth);
unsigned long original_irqs;
unsigned long remaining_irqs;
struct pt_regs *old_regs;
@@ -120,7 +120,7 @@ void tile_dev_intr(struct pt_regs *regs, int intnum)
/* Count device irqs; Linux IPIs are counted elsewhere. */
if (irq != IRQ_RESCHEDULE)
- __get_cpu_var(irq_stat).irq_dev_intr_count++;
+ __this_cpu_inc(irq_stat.irq_dev_intr_count);
generic_handle_irq(irq);
}
@@ -130,10 +130,10 @@ void tile_dev_intr(struct pt_regs *regs, int intnum)
* including any that were reenabled during interrupt
* handling.
*/
- if (depth == 0)
- unmask_irqs(~__get_cpu_var(irq_disable_mask));
+ if (depth == 1)
+ unmask_irqs(~__this_cpu_read(irq_disable_mask));
- __get_cpu_var(irq_depth)--;
+ __this_cpu_dec(irq_depth);
/*
* Track time spent against the current process again and
@@ -151,7 +151,7 @@ void tile_dev_intr(struct pt_regs *regs, int intnum)
static void tile_irq_chip_enable(struct irq_data *d)
{
get_cpu_var(irq_disable_mask) &= ~(1UL << d->irq);
- if (__get_cpu_var(irq_depth) == 0)
+ if (__this_cpu_read(irq_depth) == 0)
unmask_irqs(1UL << d->irq);
put_cpu_var(irq_disable_mask);
}
@@ -197,7 +197,7 @@ static void tile_irq_chip_ack(struct irq_data *d)
*/
static void tile_irq_chip_eoi(struct irq_data *d)
{
- if (!(__get_cpu_var(irq_disable_mask) & (1UL << d->irq)))
+ if (!(__this_cpu_read(irq_disable_mask) & (1UL << d->irq)))
unmask_irqs(1UL << d->irq);
}
diff --git a/arch/tile/kernel/messaging.c b/arch/tile/kernel/messaging.c
index 7867266f9716..ac950be1318e 100644
--- a/arch/tile/kernel/messaging.c
+++ b/arch/tile/kernel/messaging.c
@@ -28,7 +28,7 @@ static DEFINE_PER_CPU(HV_MsgState, msg_state);
void init_messaging(void)
{
/* Allocate storage for messages in kernel space */
- HV_MsgState *state = &__get_cpu_var(msg_state);
+ HV_MsgState *state = this_cpu_ptr(&msg_state);
int rc = hv_register_message_state(state);
if (rc != HV_OK)
panic("hv_register_message_state: error %d", rc);
@@ -96,7 +96,7 @@ void hv_message_intr(struct pt_regs *regs, int intnum)
struct hv_driver_cb *cb =
(struct hv_driver_cb *)him->intarg;
cb->callback(cb, him->intdata);
- __get_cpu_var(irq_stat).irq_hv_msg_count++;
+ __this_cpu_inc(irq_stat.irq_hv_msg_count);
}
}
diff --git a/arch/tile/kernel/perf_event.c b/arch/tile/kernel/perf_event.c
index 2bf6c9c135c1..bb509cee3b59 100644
--- a/arch/tile/kernel/perf_event.c
+++ b/arch/tile/kernel/perf_event.c
@@ -590,7 +590,7 @@ static int tile_event_set_period(struct perf_event *event)
*/
static void tile_pmu_stop(struct perf_event *event, int flags)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc = &event->hw;
int idx = hwc->idx;
@@ -616,7 +616,7 @@ static void tile_pmu_stop(struct perf_event *event, int flags)
*/
static void tile_pmu_start(struct perf_event *event, int flags)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int idx = event->hw.idx;
if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
@@ -650,7 +650,7 @@ static void tile_pmu_start(struct perf_event *event, int flags)
*/
static int tile_pmu_add(struct perf_event *event, int flags)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct hw_perf_event *hwc;
unsigned long mask;
int b, max_cnt;
@@ -706,7 +706,7 @@ static int tile_pmu_add(struct perf_event *event, int flags)
*/
static void tile_pmu_del(struct perf_event *event, int flags)
{
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
int i;
/*
@@ -880,14 +880,14 @@ static struct pmu tilera_pmu = {
int tile_pmu_handle_irq(struct pt_regs *regs, int fault)
{
struct perf_sample_data data;
- struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
struct perf_event *event;
struct hw_perf_event *hwc;
u64 val;
unsigned long status;
int bit;
- __get_cpu_var(perf_irqs)++;
+ __this_cpu_inc(perf_irqs);
if (!atomic_read(&tile_active_events))
return 0;
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index 16ed58948757..0050cbc1d9de 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -64,7 +64,7 @@ early_param("idle", idle_setup);
void arch_cpu_idle(void)
{
- __get_cpu_var(irq_stat).idle_timestamp = jiffies;
+ __this_cpu_write(irq_stat.idle_timestamp, jiffies);
_cpu_idle();
}
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index 112ababa9e55..b9736ded06f2 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -1218,7 +1218,8 @@ static void __init validate_hv(void)
* various asid variables to their appropriate initial states.
*/
asid_range = hv_inquire_asid(0);
- __get_cpu_var(current_asid) = min_asid = asid_range.start;
+ min_asid = asid_range.start;
+ __this_cpu_write(current_asid, min_asid);
max_asid = asid_range.start + asid_range.size - 1;
if (hv_confstr(HV_CONFSTR_CHIP_MODEL, (HV_VirtAddr)chip_model,
diff --git a/arch/tile/kernel/single_step.c b/arch/tile/kernel/single_step.c
index de07fa7d1315..6cb2ce31b5a2 100644
--- a/arch/tile/kernel/single_step.c
+++ b/arch/tile/kernel/single_step.c
@@ -740,7 +740,7 @@ static DEFINE_PER_CPU(unsigned long, ss_saved_pc);
void gx_singlestep_handle(struct pt_regs *regs, int fault_num)
{
- unsigned long *ss_pc = &__get_cpu_var(ss_saved_pc);
+ unsigned long *ss_pc = this_cpu_ptr(&ss_saved_pc);
struct thread_info *info = (void *)current_thread_info();
int is_single_step = test_ti_thread_flag(info, TIF_SINGLESTEP);
unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K);
@@ -766,7 +766,7 @@ void gx_singlestep_handle(struct pt_regs *regs, int fault_num)
void single_step_once(struct pt_regs *regs)
{
- unsigned long *ss_pc = &__get_cpu_var(ss_saved_pc);
+ unsigned long *ss_pc = this_cpu_ptr(&ss_saved_pc);
unsigned long control = __insn_mfspr(SPR_SINGLE_STEP_CONTROL_K);
*ss_pc = regs->pc;
diff --git a/arch/tile/kernel/smp.c b/arch/tile/kernel/smp.c
index 01e8ab29f43a..d3c4ed780ce2 100644
--- a/arch/tile/kernel/smp.c
+++ b/arch/tile/kernel/smp.c
@@ -183,12 +183,13 @@ void flush_icache_range(unsigned long start, unsigned long end)
preempt_enable();
}
}
+EXPORT_SYMBOL(flush_icache_range);
/* Called when smp_send_reschedule() triggers IRQ_RESCHEDULE. */
static irqreturn_t handle_reschedule_ipi(int irq, void *token)
{
- __get_cpu_var(irq_stat).irq_resched_count++;
+ __this_cpu_inc(irq_stat.irq_resched_count);
scheduler_ipi();
return IRQ_HANDLED;
diff --git a/arch/tile/kernel/smpboot.c b/arch/tile/kernel/smpboot.c
index 732e9d138661..0d59a1b60c74 100644
--- a/arch/tile/kernel/smpboot.c
+++ b/arch/tile/kernel/smpboot.c
@@ -41,7 +41,7 @@ void __init smp_prepare_boot_cpu(void)
int cpu = smp_processor_id();
set_cpu_online(cpu, 1);
set_cpu_present(cpu, 1);
- __get_cpu_var(cpu_state) = CPU_ONLINE;
+ __this_cpu_write(cpu_state, CPU_ONLINE);
init_messaging();
}
@@ -158,7 +158,7 @@ static void start_secondary(void)
/* printk(KERN_DEBUG "Initializing CPU#%d\n", cpuid); */
/* Initialize the current asid for our first page table. */
- __get_cpu_var(current_asid) = min_asid;
+ __this_cpu_write(current_asid, min_asid);
/* Set up this thread as another owner of the init_mm */
atomic_inc(&init_mm.mm_count);
@@ -201,7 +201,7 @@ void online_secondary(void)
notify_cpu_starting(smp_processor_id());
set_cpu_online(smp_processor_id(), 1);
- __get_cpu_var(cpu_state) = CPU_ONLINE;
+ __this_cpu_write(cpu_state, CPU_ONLINE);
/* Set up tile-specific state for this cpu. */
setup_cpu(0);
diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c
index d8fbc289e680..b854a1cd0079 100644
--- a/arch/tile/kernel/time.c
+++ b/arch/tile/kernel/time.c
@@ -162,7 +162,7 @@ static DEFINE_PER_CPU(struct clock_event_device, tile_timer) = {
void setup_tile_timer(void)
{
- struct clock_event_device *evt = &__get_cpu_var(tile_timer);
+ struct clock_event_device *evt = this_cpu_ptr(&tile_timer);
/* Fill in fields that are speed-specific. */
clockevents_calc_mult_shift(evt, cycles_per_sec, TILE_MINSEC);
@@ -182,7 +182,7 @@ void setup_tile_timer(void)
void do_timer_interrupt(struct pt_regs *regs, int fault_num)
{
struct pt_regs *old_regs = set_irq_regs(regs);
- struct clock_event_device *evt = &__get_cpu_var(tile_timer);
+ struct clock_event_device *evt = this_cpu_ptr(&tile_timer);
/*
* Mask the timer interrupt here, since we are a oneshot timer
@@ -194,7 +194,7 @@ void do_timer_interrupt(struct pt_regs *regs, int fault_num)
irq_enter();
/* Track interrupt count. */
- __get_cpu_var(irq_stat).irq_timer_count++;
+ __this_cpu_inc(irq_stat.irq_timer_count);
/* Call the generic timer handler */
evt->event_handler(evt);
@@ -235,7 +235,7 @@ cycles_t ns2cycles(unsigned long nsecs)
* We do not have to disable preemption here as each core has the same
* clock frequency.
*/
- struct clock_event_device *dev = &__raw_get_cpu_var(tile_timer);
+ struct clock_event_device *dev = raw_cpu_ptr(&tile_timer);
/*
* as in clocksource.h and x86's timer.h, we split the calculation
@@ -249,33 +249,52 @@ cycles_t ns2cycles(unsigned long nsecs)
void update_vsyscall_tz(void)
{
- /* Userspace gettimeofday will spin while this value is odd. */
- ++vdso_data->tz_update_count;
- smp_wmb();
+ write_seqcount_begin(&vdso_data->tz_seq);
vdso_data->tz_minuteswest = sys_tz.tz_minuteswest;
vdso_data->tz_dsttime = sys_tz.tz_dsttime;
- smp_wmb();
- ++vdso_data->tz_update_count;
+ write_seqcount_end(&vdso_data->tz_seq);
}
void update_vsyscall(struct timekeeper *tk)
{
- struct timespec *wtm = &tk->wall_to_monotonic;
- struct clocksource *clock = tk->tkr.clock;
-
- if (clock != &cycle_counter_cs)
+ if (tk->tkr.clock != &cycle_counter_cs)
return;
- /* Userspace gettimeofday will spin while this value is odd. */
- ++vdso_data->tb_update_count;
- smp_wmb();
- vdso_data->xtime_tod_stamp = tk->tkr.cycle_last;
- vdso_data->xtime_clock_sec = tk->xtime_sec;
- vdso_data->xtime_clock_nsec = tk->tkr.xtime_nsec;
- vdso_data->wtom_clock_sec = wtm->tv_sec;
- vdso_data->wtom_clock_nsec = wtm->tv_nsec;
- vdso_data->mult = tk->tkr.mult;
- vdso_data->shift = tk->tkr.shift;
- smp_wmb();
- ++vdso_data->tb_update_count;
+ write_seqcount_begin(&vdso_data->tb_seq);
+
+ vdso_data->cycle_last = tk->tkr.cycle_last;
+ vdso_data->mask = tk->tkr.mask;
+ vdso_data->mult = tk->tkr.mult;
+ vdso_data->shift = tk->tkr.shift;
+
+ vdso_data->wall_time_sec = tk->xtime_sec;
+ vdso_data->wall_time_snsec = tk->tkr.xtime_nsec;
+
+ vdso_data->monotonic_time_sec = tk->xtime_sec
+ + tk->wall_to_monotonic.tv_sec;
+ vdso_data->monotonic_time_snsec = tk->tkr.xtime_nsec
+ + ((u64)tk->wall_to_monotonic.tv_nsec
+ << tk->tkr.shift);
+ while (vdso_data->monotonic_time_snsec >=
+ (((u64)NSEC_PER_SEC) << tk->tkr.shift)) {
+ vdso_data->monotonic_time_snsec -=
+ ((u64)NSEC_PER_SEC) << tk->tkr.shift;
+ vdso_data->monotonic_time_sec++;
+ }
+
+ vdso_data->wall_time_coarse_sec = tk->xtime_sec;
+ vdso_data->wall_time_coarse_nsec = (long)(tk->tkr.xtime_nsec >>
+ tk->tkr.shift);
+
+ vdso_data->monotonic_time_coarse_sec =
+ vdso_data->wall_time_coarse_sec + tk->wall_to_monotonic.tv_sec;
+ vdso_data->monotonic_time_coarse_nsec =
+ vdso_data->wall_time_coarse_nsec + tk->wall_to_monotonic.tv_nsec;
+
+ while (vdso_data->monotonic_time_coarse_nsec >= NSEC_PER_SEC) {
+ vdso_data->monotonic_time_coarse_nsec -= NSEC_PER_SEC;
+ vdso_data->monotonic_time_coarse_sec++;
+ }
+
+ write_seqcount_end(&vdso_data->tb_seq);
}
diff --git a/arch/tile/kernel/traps.c b/arch/tile/kernel/traps.c
index f3ceb6308e42..86900ccd4977 100644
--- a/arch/tile/kernel/traps.c
+++ b/arch/tile/kernel/traps.c
@@ -277,7 +277,7 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
if (fixup_exception(regs)) /* ILL_TRANS or UNALIGN_DATA */
return;
if (fault_num >= 0 &&
- fault_num < sizeof(int_name)/sizeof(int_name[0]) &&
+ fault_num < ARRAY_SIZE(int_name) &&
int_name[fault_num] != NULL)
name = int_name[fault_num];
else
diff --git a/arch/tile/kernel/vdso/vdso.lds.S b/arch/tile/kernel/vdso/vdso.lds.S
index 041cd6c39c83..731529f3f06f 100644
--- a/arch/tile/kernel/vdso/vdso.lds.S
+++ b/arch/tile/kernel/vdso/vdso.lds.S
@@ -82,6 +82,8 @@ VERSION
__vdso_rt_sigreturn;
__vdso_gettimeofday;
gettimeofday;
+ __vdso_clock_gettime;
+ clock_gettime;
local:*;
};
}
diff --git a/arch/tile/kernel/vdso/vgettimeofday.c b/arch/tile/kernel/vdso/vgettimeofday.c
index e933fb9fbf5c..8bb21eda07d8 100644
--- a/arch/tile/kernel/vdso/vgettimeofday.c
+++ b/arch/tile/kernel/vdso/vgettimeofday.c
@@ -15,6 +15,7 @@
#define VDSO_BUILD /* avoid some shift warnings for -m32 in <asm/page.h> */
#include <linux/time.h>
#include <asm/timex.h>
+#include <asm/unistd.h>
#include <asm/vdso.h>
#if CHIP_HAS_SPLIT_CYCLE()
@@ -35,6 +36,11 @@ static inline cycles_t get_cycles_inline(void)
#define get_cycles get_cycles_inline
#endif
+struct syscall_return_value {
+ long value;
+ long error;
+};
+
/*
* Find out the vDSO data page address in the process address space.
*/
@@ -50,59 +56,143 @@ inline unsigned long get_datapage(void)
return ret;
}
-int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
+static inline u64 vgetsns(struct vdso_data *vdso)
+{
+ return ((get_cycles() - vdso->cycle_last) & vdso->mask) * vdso->mult;
+}
+
+static inline int do_realtime(struct vdso_data *vdso, struct timespec *ts)
+{
+ unsigned count;
+ u64 ns;
+
+ do {
+ count = read_seqcount_begin(&vdso->tb_seq);
+ ts->tv_sec = vdso->wall_time_sec;
+ ns = vdso->wall_time_snsec;
+ ns += vgetsns(vdso);
+ ns >>= vdso->shift;
+ } while (unlikely(read_seqcount_retry(&vdso->tb_seq, count)));
+
+ ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
+ ts->tv_nsec = ns;
+
+ return 0;
+}
+
+static inline int do_monotonic(struct vdso_data *vdso, struct timespec *ts)
+{
+ unsigned count;
+ u64 ns;
+
+ do {
+ count = read_seqcount_begin(&vdso->tb_seq);
+ ts->tv_sec = vdso->monotonic_time_sec;
+ ns = vdso->monotonic_time_snsec;
+ ns += vgetsns(vdso);
+ ns >>= vdso->shift;
+ } while (unlikely(read_seqcount_retry(&vdso->tb_seq, count)));
+
+ ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
+ ts->tv_nsec = ns;
+
+ return 0;
+}
+
+static inline int do_realtime_coarse(struct vdso_data *vdso,
+ struct timespec *ts)
+{
+ unsigned count;
+
+ do {
+ count = read_seqcount_begin(&vdso->tb_seq);
+ ts->tv_sec = vdso->wall_time_coarse_sec;
+ ts->tv_nsec = vdso->wall_time_coarse_nsec;
+ } while (unlikely(read_seqcount_retry(&vdso->tb_seq, count)));
+
+ return 0;
+}
+
+static inline int do_monotonic_coarse(struct vdso_data *vdso,
+ struct timespec *ts)
{
- cycles_t cycles;
- unsigned long count, sec, ns;
- volatile struct vdso_data *vdso_data;
+ unsigned count;
+
+ do {
+ count = read_seqcount_begin(&vdso->tb_seq);
+ ts->tv_sec = vdso->monotonic_time_coarse_sec;
+ ts->tv_nsec = vdso->monotonic_time_coarse_nsec;
+ } while (unlikely(read_seqcount_retry(&vdso->tb_seq, count)));
+
+ return 0;
+}
+
+struct syscall_return_value __vdso_gettimeofday(struct timeval *tv,
+ struct timezone *tz)
+{
+ struct syscall_return_value ret = { 0, 0 };
+ unsigned count;
+ struct vdso_data *vdso = (struct vdso_data *)get_datapage();
- vdso_data = (struct vdso_data *)get_datapage();
/* The use of the timezone is obsolete, normally tz is NULL. */
if (unlikely(tz != NULL)) {
- while (1) {
- /* Spin until the update finish. */
- count = vdso_data->tz_update_count;
- if (count & 1)
- continue;
-
- tz->tz_minuteswest = vdso_data->tz_minuteswest;
- tz->tz_dsttime = vdso_data->tz_dsttime;
-
- /* Check whether updated, read again if so. */
- if (count == vdso_data->tz_update_count)
- break;
- }
+ do {
+ count = read_seqcount_begin(&vdso->tz_seq);
+ tz->tz_minuteswest = vdso->tz_minuteswest;
+ tz->tz_dsttime = vdso->tz_dsttime;
+ } while (unlikely(read_seqcount_retry(&vdso->tz_seq, count)));
}
if (unlikely(tv == NULL))
- return 0;
-
- while (1) {
- /* Spin until the update finish. */
- count = vdso_data->tb_update_count;
- if (count & 1)
- continue;
-
- sec = vdso_data->xtime_clock_sec;
- cycles = get_cycles() - vdso_data->xtime_tod_stamp;
- ns = (cycles * vdso_data->mult) + vdso_data->xtime_clock_nsec;
- ns >>= vdso_data->shift;
-
- if (ns >= NSEC_PER_SEC) {
- ns -= NSEC_PER_SEC;
- sec += 1;
- }
-
- /* Check whether updated, read again if so. */
- if (count == vdso_data->tb_update_count)
- break;
- }
+ return ret;
- tv->tv_sec = sec;
- tv->tv_usec = ns / 1000;
+ do_realtime(vdso, (struct timespec *)tv);
+ tv->tv_usec /= 1000;
- return 0;
+ return ret;
}
int gettimeofday(struct timeval *tv, struct timezone *tz)
__attribute__((weak, alias("__vdso_gettimeofday")));
+
+static struct syscall_return_value vdso_fallback_gettime(long clock,
+ struct timespec *ts)
+{
+ struct syscall_return_value ret;
+ __asm__ __volatile__ (
+ "swint1"
+ : "=R00" (ret.value), "=R01" (ret.error)
+ : "R10" (__NR_clock_gettime), "R00" (clock), "R01" (ts)
+ : "r2", "r3", "r4", "r5", "r6", "r7",
+ "r8", "r9", "r11", "r12", "r13", "r14", "r15",
+ "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
+ "r24", "r25", "r26", "r27", "r28", "r29", "memory");
+ return ret;
+}
+
+struct syscall_return_value __vdso_clock_gettime(clockid_t clock,
+ struct timespec *ts)
+{
+ struct vdso_data *vdso = (struct vdso_data *)get_datapage();
+ struct syscall_return_value ret = { 0, 0 };
+
+ switch (clock) {
+ case CLOCK_REALTIME:
+ do_realtime(vdso, ts);
+ return ret;
+ case CLOCK_MONOTONIC:
+ do_monotonic(vdso, ts);
+ return ret;
+ case CLOCK_REALTIME_COARSE:
+ do_realtime_coarse(vdso, ts);
+ return ret;
+ case CLOCK_MONOTONIC_COARSE:
+ do_monotonic_coarse(vdso, ts);
+ return ret;
+ default:
+ return vdso_fallback_gettime(clock, ts);
+ }
+}
+
+int clock_gettime(clockid_t clock, struct timespec *ts)
+ __attribute__((weak, alias("__vdso_clock_gettime")));
diff --git a/arch/tile/kernel/vmlinux.lds.S b/arch/tile/kernel/vmlinux.lds.S
index f1819423ffc9..0e059a0101ea 100644
--- a/arch/tile/kernel/vmlinux.lds.S
+++ b/arch/tile/kernel/vmlinux.lds.S
@@ -66,11 +66,9 @@ SECTIONS
. = ALIGN(PAGE_SIZE);
__init_begin = .;
- VMLINUX_SYMBOL(_sinitdata) = .;
INIT_DATA_SECTION(16) :data =0
PERCPU_SECTION(L2_CACHE_BYTES)
. = ALIGN(PAGE_SIZE);
- VMLINUX_SYMBOL(_einitdata) = .;
__init_end = .;
_sdata = .; /* Start of data section */