aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/time/timecompare.c
blob: 12f5c55090bea159652c299d0db1a0a2969a3424 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
/*
 * Copyright (C) 2009 Intel Corporation.
 * Author: Patrick Ohly <patrick.ohly@intel.com>
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; either version 2 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, write to the Free Software
 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
 */

#include <linux/timecompare.h>
#include <linux/module.h>
#include <linux/math64.h>

/*
 * fixed point arithmetic scale factor for skew
 *
 * Usually one would measure skew in ppb (parts per billion, 1e9), but
 * using a factor of 2 simplifies the math.
 */
#define TIMECOMPARE_SKEW_RESOLUTION (((s64)1)<<30)

ktime_t timecompare_transform(struct timecompare *sync,
			      u64 source_tstamp)
{
	u64 nsec;

	nsec = source_tstamp + sync->offset;
	nsec += (s64)(source_tstamp - sync->last_update) * sync->skew /
		TIMECOMPARE_SKEW_RESOLUTION;

	return ns_to_ktime(nsec);
}
EXPORT_SYMBOL_GPL(timecompare_transform);

int timecompare_offset(struct timecompare *sync,
		       s64 *offset,
		       u64 *source_tstamp)
{
	u64 start_source = 0, end_source = 0;
	struct {
		s64 offset;
		s64 duration_target;
	} buffer[10], sample, *samples;
	int counter = 0, i;
	int used;
	int index;
	int num_samples = sync->num_samples;

	if (num_samples > sizeof(buffer)/sizeof(buffer[0])) {
		samples = kmalloc(sizeof(*samples) * num_samples, GFP_ATOMIC);
		if (!samples) {
			samples = buffer;
			num_samples = sizeof(buffer)/sizeof(buffer[0]);
		}
	} else {
		samples = buffer;
	}

	/* run until we have enough valid samples, but do not try forever */
	i = 0;
	counter = 0;
	while (1) {
		u64 ts;
		ktime_t start, end;

		start = sync->target();
		ts = timecounter_read(sync->source);
		end = sync->target();

		if (!i)
			start_source = ts;

		/* ignore negative durations */
		sample.duration_target = ktime_to_ns(ktime_sub(end, start));
		if (sample.duration_target >= 0) {
			/*
			 * assume symetric delay to and from source:
			 * average target time corresponds to measured
			 * source time
			 */
			sample.offset =
				(ktime_to_ns(end) + ktime_to_ns(start)) / 2 -
				ts;

			/* simple insertion sort based on duration */
			index = counter - 1;
			while (index >= 0) {
				if (samples[index].duration_target <
				    sample.duration_target)
					break;
				samples[index + 1] = samples[index];
				index--;
			}
			samples[index + 1] = sample;
			counter++;
		}

		i++;
		if (counter >= num_samples || i >= 100000) {
			end_source = ts;
			break;
		}
	}

	*source_tstamp = (end_source + start_source) / 2;

	/* remove outliers by only using 75% of the samples */
	used = counter * 3 / 4;
	if (!used)
		used = counter;
	if (used) {
		/* calculate average */
		s64 off = 0;
		for (index = 0; index < used; index++)
			off += samples[index].offset;
		*offset = div_s64(off, used);
	}

	if (samples && samples != buffer)
		kfree(samples);

	return used;
}
EXPORT_SYMBOL_GPL(timecompare_offset);

void __timecompare_update(struct timecompare *sync,
			  u64 source_tstamp)
{
	s64 offset;
	u64 average_time;

	if (!timecompare_offset(sync, &offset, &average_time))
		return;

	if (!sync->last_update) {
		sync->last_update = average_time;
		sync->offset = offset;
		sync->skew = 0;
	} else {
		s64 delta_nsec = average_time - sync->last_update;

		/* avoid division by negative or small deltas */
		if (delta_nsec >= 10000) {
			s64 delta_offset_nsec = offset - sync->offset;
			s64 skew; /* delta_offset_nsec *
				     TIMECOMPARE_SKEW_RESOLUTION /
				     delta_nsec */
			u64 divisor;

			/* div_s64() is limited to 32 bit divisor */
			skew = delta_offset_nsec * TIMECOMPARE_SKEW_RESOLUTION;
			divisor = delta_nsec;
			while (unlikely(divisor >= ((s64)1) << 32)) {
				/* divide both by 2; beware, right shift
				   of negative value has undefined
				   behavior and can only be used for
				   the positive divisor */
				skew = div_s64(skew, 2);
				divisor >>= 1;
			}
			skew = div_s64(skew, divisor);

			/*
			 * Calculate new overall skew as 4/16 the
			 * old value and 12/16 the new one. This is
			 * a rather arbitrary tradeoff between
			 * only using the latest measurement (0/16 and
			 * 16/16) and even more weight on past measurements.
			 */
#define TIMECOMPARE_NEW_SKEW_PER_16 12
			sync->skew =
				div_s64((16 - TIMECOMPARE_NEW_SKEW_PER_16) *
					sync->skew +
					TIMECOMPARE_NEW_SKEW_PER_16 * skew,
					16);
			sync->last_update = average_time;
			sync->offset = offset;
		}
	}
}
EXPORT_SYMBOL_GPL(__timecompare_update);