aboutsummaryrefslogtreecommitdiffstatshomepage
path: root/arch/s390/kernel/time.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/kernel/time.c')
-rw-r--r--arch/s390/kernel/time.c38
1 files changed, 22 insertions, 16 deletions
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 06bcfa636638..326cb8f75f58 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -68,10 +68,10 @@ EXPORT_SYMBOL(s390_epoch_delta_notifier);
unsigned char ptff_function_mask[16];
-static unsigned long long lpar_offset;
-static unsigned long long initial_leap_seconds;
-static unsigned long long tod_steering_end;
-static long long tod_steering_delta;
+static unsigned long lpar_offset;
+static unsigned long initial_leap_seconds;
+static unsigned long tod_steering_end;
+static long tod_steering_delta;
/*
* Get time offsets with PTFF
@@ -80,10 +80,12 @@ void __init time_early_init(void)
{
struct ptff_qto qto;
struct ptff_qui qui;
+ int cs;
/* Initialize TOD steering parameters */
tod_steering_end = tod_clock_base.tod;
- vdso_data->arch_data.tod_steering_end = tod_steering_end;
+ for (cs = 0; cs < CS_BASES; cs++)
+ vdso_data[cs].arch_data.tod_steering_end = tod_steering_end;
if (!test_facility(28))
return;
@@ -96,7 +98,7 @@ void __init time_early_init(void)
/* get initial leap seconds */
if (ptff_query(PTFF_QUI) && ptff(&qui, sizeof(qui), PTFF_QUI) == 0)
- initial_leap_seconds = (unsigned long long)
+ initial_leap_seconds = (unsigned long)
((long) qui.old_leap * 4096000000L);
}
@@ -222,7 +224,7 @@ void __init read_persistent_wall_and_boot_offset(struct timespec64 *wall_time,
static u64 read_tod_clock(struct clocksource *cs)
{
- unsigned long long now, adj;
+ unsigned long now, adj;
preempt_disable(); /* protect from changes to steering parameters */
now = get_tod_clock();
@@ -362,10 +364,11 @@ static inline int check_sync_clock(void)
* Apply clock delta to the global data structures.
* This is called once on the CPU that performed the clock sync.
*/
-static void clock_sync_global(unsigned long long delta)
+static void clock_sync_global(unsigned long delta)
{
unsigned long now, adj;
struct ptff_qto qto;
+ int cs;
/* Fixup the monotonic sched clock. */
tod_clock_base.eitod += delta;
@@ -378,10 +381,13 @@ static void clock_sync_global(unsigned long long delta)
-(adj >> 15) : (adj >> 15);
tod_steering_delta += delta;
if ((abs(tod_steering_delta) >> 48) != 0)
- panic("TOD clock sync offset %lli is too large to drift\n",
+ panic("TOD clock sync offset %li is too large to drift\n",
tod_steering_delta);
tod_steering_end = now + (abs(tod_steering_delta) << 15);
- vdso_data->arch_data.tod_steering_end = tod_steering_end;
+ for (cs = 0; cs < CS_BASES; cs++) {
+ vdso_data[cs].arch_data.tod_steering_end = tod_steering_end;
+ vdso_data[cs].arch_data.tod_steering_delta = tod_steering_delta;
+ }
/* Update LPAR offset. */
if (ptff_query(PTFF_QTO) && ptff(&qto, sizeof(qto), PTFF_QTO) == 0)
@@ -394,7 +400,7 @@ static void clock_sync_global(unsigned long long delta)
* Apply clock delta to the per-CPU data structures of this CPU.
* This is called for each online CPU after the call to clock_sync_global.
*/
-static void clock_sync_local(unsigned long long delta)
+static void clock_sync_local(unsigned long delta)
{
/* Add the delta to the clock comparator. */
if (S390_lowcore.clock_comparator != clock_comparator_max) {
@@ -418,7 +424,7 @@ static void __init time_init_wq(void)
struct clock_sync_data {
atomic_t cpus;
int in_sync;
- unsigned long long clock_delta;
+ unsigned long clock_delta;
};
/*
@@ -538,7 +544,7 @@ static int stpinfo_valid(void)
static int stp_sync_clock(void *data)
{
struct clock_sync_data *sync = data;
- unsigned long long clock_delta, flags;
+ u64 clock_delta, flags;
static int first;
int rc;
@@ -720,8 +726,8 @@ static ssize_t ctn_id_show(struct device *dev,
mutex_lock(&stp_mutex);
if (stpinfo_valid())
- ret = sprintf(buf, "%016llx\n",
- *(unsigned long long *) stp_info.ctnid);
+ ret = sprintf(buf, "%016lx\n",
+ *(unsigned long *) stp_info.ctnid);
mutex_unlock(&stp_mutex);
return ret;
}
@@ -794,7 +800,7 @@ static ssize_t leap_seconds_scheduled_show(struct device *dev,
if (!stzi.lsoib.p)
return sprintf(buf, "0,0\n");
- return sprintf(buf, "%llu,%d\n",
+ return sprintf(buf, "%lu,%d\n",
tod_to_ns(stzi.lsoib.nlsout - TOD_UNIX_EPOCH) / NSEC_PER_SEC,
stzi.lsoib.nlso - stzi.lsoib.also);
}