clocksource: Get rid of cycle_last
cycle_last was added to the clocksource to support the TSC validation. We moved that to the core code, so we can get rid of the extra copy. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: John Stultz <john.stultz@linaro.org>
This commit is contained in:
parent
09ec54429c
commit
4a0e637738
@ -224,7 +224,7 @@ void update_vsyscall(struct timekeeper *tk)
|
|||||||
vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
|
vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
|
||||||
|
|
||||||
if (!use_syscall) {
|
if (!use_syscall) {
|
||||||
vdso_data->cs_cycle_last = tk->clock->cycle_last;
|
vdso_data->cs_cycle_last = tk->cycle_last;
|
||||||
vdso_data->xtime_clock_sec = tk->xtime_sec;
|
vdso_data->xtime_clock_sec = tk->xtime_sec;
|
||||||
vdso_data->xtime_clock_nsec = tk->xtime_nsec;
|
vdso_data->xtime_clock_nsec = tk->xtime_nsec;
|
||||||
vdso_data->cs_mult = tk->mult;
|
vdso_data->cs_mult = tk->mult;
|
||||||
|
@ -441,7 +441,7 @@ void update_vsyscall_tz(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
void update_vsyscall_old(struct timespec *wall, struct timespec *wtm,
|
void update_vsyscall_old(struct timespec *wall, struct timespec *wtm,
|
||||||
struct clocksource *c, u32 mult)
|
struct clocksource *c, u32 mult, cycles_t cycle_last)
|
||||||
{
|
{
|
||||||
write_seqcount_begin(&fsyscall_gtod_data.seq);
|
write_seqcount_begin(&fsyscall_gtod_data.seq);
|
||||||
|
|
||||||
@ -450,7 +450,7 @@ void update_vsyscall_old(struct timespec *wall, struct timespec *wtm,
|
|||||||
fsyscall_gtod_data.clk_mult = mult;
|
fsyscall_gtod_data.clk_mult = mult;
|
||||||
fsyscall_gtod_data.clk_shift = c->shift;
|
fsyscall_gtod_data.clk_shift = c->shift;
|
||||||
fsyscall_gtod_data.clk_fsys_mmio = c->archdata.fsys_mmio;
|
fsyscall_gtod_data.clk_fsys_mmio = c->archdata.fsys_mmio;
|
||||||
fsyscall_gtod_data.clk_cycle_last = c->cycle_last;
|
fsyscall_gtod_data.clk_cycle_last = cycle_last;
|
||||||
|
|
||||||
/* copy kernel time structures */
|
/* copy kernel time structures */
|
||||||
fsyscall_gtod_data.wall_time.tv_sec = wall->tv_sec;
|
fsyscall_gtod_data.wall_time.tv_sec = wall->tv_sec;
|
||||||
|
@ -741,7 +741,7 @@ static cycle_t timebase_read(struct clocksource *cs)
|
|||||||
}
|
}
|
||||||
|
|
||||||
void update_vsyscall_old(struct timespec *wall_time, struct timespec *wtm,
|
void update_vsyscall_old(struct timespec *wall_time, struct timespec *wtm,
|
||||||
struct clocksource *clock, u32 mult)
|
struct clocksource *clock, u32 mult, cycle_t cycle_last)
|
||||||
{
|
{
|
||||||
u64 new_tb_to_xs, new_stamp_xsec;
|
u64 new_tb_to_xs, new_stamp_xsec;
|
||||||
u32 frac_sec;
|
u32 frac_sec;
|
||||||
@ -774,7 +774,7 @@ void update_vsyscall_old(struct timespec *wall_time, struct timespec *wtm,
|
|||||||
* We expect the caller to have done the first increment of
|
* We expect the caller to have done the first increment of
|
||||||
* vdso_data->tb_update_count already.
|
* vdso_data->tb_update_count already.
|
||||||
*/
|
*/
|
||||||
vdso_data->tb_orig_stamp = clock->cycle_last;
|
vdso_data->tb_orig_stamp = cycle_last;
|
||||||
vdso_data->stamp_xsec = new_stamp_xsec;
|
vdso_data->stamp_xsec = new_stamp_xsec;
|
||||||
vdso_data->tb_to_xs = new_tb_to_xs;
|
vdso_data->tb_to_xs = new_tb_to_xs;
|
||||||
vdso_data->wtom_clock_sec = wtm->tv_sec;
|
vdso_data->wtom_clock_sec = wtm->tv_sec;
|
||||||
|
@ -220,7 +220,7 @@ void update_vsyscall(struct timekeeper *tk)
|
|||||||
/* Make userspace gettimeofday spin until we're done. */
|
/* Make userspace gettimeofday spin until we're done. */
|
||||||
++vdso_data->tb_update_count;
|
++vdso_data->tb_update_count;
|
||||||
smp_wmb();
|
smp_wmb();
|
||||||
vdso_data->xtime_tod_stamp = tk->clock->cycle_last;
|
vdso_data->xtime_tod_stamp = tk->cycle_last;
|
||||||
vdso_data->xtime_clock_sec = tk->xtime_sec;
|
vdso_data->xtime_clock_sec = tk->xtime_sec;
|
||||||
vdso_data->xtime_clock_nsec = tk->xtime_nsec;
|
vdso_data->xtime_clock_nsec = tk->xtime_nsec;
|
||||||
vdso_data->wtom_clock_sec =
|
vdso_data->wtom_clock_sec =
|
||||||
|
@ -269,7 +269,7 @@ void update_vsyscall(struct timekeeper *tk)
|
|||||||
/* Userspace gettimeofday will spin while this value is odd. */
|
/* Userspace gettimeofday will spin while this value is odd. */
|
||||||
++vdso_data->tb_update_count;
|
++vdso_data->tb_update_count;
|
||||||
smp_wmb();
|
smp_wmb();
|
||||||
vdso_data->xtime_tod_stamp = clock->cycle_last;
|
vdso_data->xtime_tod_stamp = tk->cycle_last;
|
||||||
vdso_data->xtime_clock_sec = tk->xtime_sec;
|
vdso_data->xtime_clock_sec = tk->xtime_sec;
|
||||||
vdso_data->xtime_clock_nsec = tk->xtime_nsec;
|
vdso_data->xtime_clock_nsec = tk->xtime_nsec;
|
||||||
vdso_data->wtom_clock_sec = wtm->tv_sec;
|
vdso_data->wtom_clock_sec = wtm->tv_sec;
|
||||||
|
@ -32,7 +32,7 @@ void update_vsyscall(struct timekeeper *tk)
|
|||||||
|
|
||||||
/* copy vsyscall data */
|
/* copy vsyscall data */
|
||||||
vdata->vclock_mode = tk->clock->archdata.vclock_mode;
|
vdata->vclock_mode = tk->clock->archdata.vclock_mode;
|
||||||
vdata->cycle_last = tk->clock->cycle_last;
|
vdata->cycle_last = tk->cycle_last;
|
||||||
vdata->mask = tk->clock->mask;
|
vdata->mask = tk->clock->mask;
|
||||||
vdata->mult = tk->mult;
|
vdata->mult = tk->mult;
|
||||||
vdata->shift = tk->shift;
|
vdata->shift = tk->shift;
|
||||||
|
@ -1001,7 +1001,7 @@ static void update_pvclock_gtod(struct timekeeper *tk)
|
|||||||
|
|
||||||
/* copy pvclock gtod data */
|
/* copy pvclock gtod data */
|
||||||
vdata->clock.vclock_mode = tk->clock->archdata.vclock_mode;
|
vdata->clock.vclock_mode = tk->clock->archdata.vclock_mode;
|
||||||
vdata->clock.cycle_last = tk->clock->cycle_last;
|
vdata->clock.cycle_last = tk->cycle_last;
|
||||||
vdata->clock.mask = tk->clock->mask;
|
vdata->clock.mask = tk->clock->mask;
|
||||||
vdata->clock.mult = tk->mult;
|
vdata->clock.mult = tk->mult;
|
||||||
vdata->clock.shift = tk->shift;
|
vdata->clock.shift = tk->shift;
|
||||||
|
@ -162,7 +162,6 @@ extern u64 timecounter_cyc2time(struct timecounter *tc,
|
|||||||
* @archdata: arch-specific data
|
* @archdata: arch-specific data
|
||||||
* @suspend: suspend function for the clocksource, if necessary
|
* @suspend: suspend function for the clocksource, if necessary
|
||||||
* @resume: resume function for the clocksource, if necessary
|
* @resume: resume function for the clocksource, if necessary
|
||||||
* @cycle_last: most recent cycle counter value seen by ::read()
|
|
||||||
* @owner: module reference, must be set by clocksource in modules
|
* @owner: module reference, must be set by clocksource in modules
|
||||||
*/
|
*/
|
||||||
struct clocksource {
|
struct clocksource {
|
||||||
@ -171,7 +170,6 @@ struct clocksource {
|
|||||||
* clocksource itself is cacheline aligned.
|
* clocksource itself is cacheline aligned.
|
||||||
*/
|
*/
|
||||||
cycle_t (*read)(struct clocksource *cs);
|
cycle_t (*read)(struct clocksource *cs);
|
||||||
cycle_t cycle_last;
|
|
||||||
cycle_t mask;
|
cycle_t mask;
|
||||||
u32 mult;
|
u32 mult;
|
||||||
u32 shift;
|
u32 shift;
|
||||||
|
@ -29,6 +29,8 @@
|
|||||||
struct timekeeper {
|
struct timekeeper {
|
||||||
/* Current clocksource used for timekeeping. */
|
/* Current clocksource used for timekeeping. */
|
||||||
struct clocksource *clock;
|
struct clocksource *clock;
|
||||||
|
/* Last cycle value */
|
||||||
|
cycle_t cycle_last;
|
||||||
/* NTP adjusted clock multiplier */
|
/* NTP adjusted clock multiplier */
|
||||||
u32 mult;
|
u32 mult;
|
||||||
/* The shift value of the current clocksource. */
|
/* The shift value of the current clocksource. */
|
||||||
@ -62,8 +64,6 @@ struct timekeeper {
|
|||||||
|
|
||||||
/* Number of clock cycles in one NTP interval. */
|
/* Number of clock cycles in one NTP interval. */
|
||||||
cycle_t cycle_interval;
|
cycle_t cycle_interval;
|
||||||
/* Last cycle value (also stored in clock->cycle_last) */
|
|
||||||
cycle_t cycle_last;
|
|
||||||
/* Number of clock shifted nano seconds in one NTP interval. */
|
/* Number of clock shifted nano seconds in one NTP interval. */
|
||||||
u64 xtime_interval;
|
u64 xtime_interval;
|
||||||
/* shifted nano seconds left over when rounding cycle_interval */
|
/* shifted nano seconds left over when rounding cycle_interval */
|
||||||
@ -91,7 +91,8 @@ extern void update_vsyscall_tz(void);
|
|||||||
#elif defined(CONFIG_GENERIC_TIME_VSYSCALL_OLD)
|
#elif defined(CONFIG_GENERIC_TIME_VSYSCALL_OLD)
|
||||||
|
|
||||||
extern void update_vsyscall_old(struct timespec *ts, struct timespec *wtm,
|
extern void update_vsyscall_old(struct timespec *ts, struct timespec *wtm,
|
||||||
struct clocksource *c, u32 mult);
|
struct clocksource *c, u32 mult,
|
||||||
|
cycles_t cycle_last);
|
||||||
extern void update_vsyscall_tz(void);
|
extern void update_vsyscall_tz(void);
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
@ -121,7 +121,7 @@ static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
|
|||||||
|
|
||||||
old_clock = tk->clock;
|
old_clock = tk->clock;
|
||||||
tk->clock = clock;
|
tk->clock = clock;
|
||||||
tk->cycle_last = clock->cycle_last = clock->read(clock);
|
tk->cycle_last = clock->read(clock);
|
||||||
|
|
||||||
/* Do the ns -> cycle conversion first, using original mult */
|
/* Do the ns -> cycle conversion first, using original mult */
|
||||||
tmp = NTP_INTERVAL_LENGTH;
|
tmp = NTP_INTERVAL_LENGTH;
|
||||||
@ -182,7 +182,7 @@ static inline s64 timekeeping_get_ns(struct timekeeper *tk)
|
|||||||
cycle_now = clock->read(clock);
|
cycle_now = clock->read(clock);
|
||||||
|
|
||||||
/* calculate the delta since the last update_wall_time: */
|
/* calculate the delta since the last update_wall_time: */
|
||||||
delta = clocksource_delta(cycle_now, clock->cycle_last, clock->mask);
|
delta = clocksource_delta(cycle_now, tk->cycle_last, clock->mask);
|
||||||
|
|
||||||
nsec = delta * tk->mult + tk->xtime_nsec;
|
nsec = delta * tk->mult + tk->xtime_nsec;
|
||||||
nsec >>= tk->shift;
|
nsec >>= tk->shift;
|
||||||
@ -202,7 +202,7 @@ static inline s64 timekeeping_get_ns_raw(struct timekeeper *tk)
|
|||||||
cycle_now = clock->read(clock);
|
cycle_now = clock->read(clock);
|
||||||
|
|
||||||
/* calculate the delta since the last update_wall_time: */
|
/* calculate the delta since the last update_wall_time: */
|
||||||
delta = clocksource_delta(cycle_now, clock->cycle_last, clock->mask);
|
delta = clocksource_delta(cycle_now, tk->cycle_last, clock->mask);
|
||||||
|
|
||||||
/* convert delta to nanoseconds. */
|
/* convert delta to nanoseconds. */
|
||||||
nsec = clocksource_cyc2ns(delta, clock->mult, clock->shift);
|
nsec = clocksource_cyc2ns(delta, clock->mult, clock->shift);
|
||||||
@ -218,7 +218,8 @@ static inline void update_vsyscall(struct timekeeper *tk)
|
|||||||
struct timespec xt;
|
struct timespec xt;
|
||||||
|
|
||||||
xt = tk_xtime(tk);
|
xt = tk_xtime(tk);
|
||||||
update_vsyscall_old(&xt, &tk->wall_to_monotonic, tk->clock, tk->mult);
|
update_vsyscall_old(&xt, &tk->wall_to_monotonic, tk->clock, tk->mult,
|
||||||
|
tk->cycle_last);
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void old_vsyscall_fixup(struct timekeeper *tk)
|
static inline void old_vsyscall_fixup(struct timekeeper *tk)
|
||||||
@ -342,8 +343,8 @@ static void timekeeping_forward_now(struct timekeeper *tk)
|
|||||||
|
|
||||||
clock = tk->clock;
|
clock = tk->clock;
|
||||||
cycle_now = clock->read(clock);
|
cycle_now = clock->read(clock);
|
||||||
delta = clocksource_delta(cycle_now, clock->cycle_last, clock->mask);
|
delta = clocksource_delta(cycle_now, tk->cycle_last, clock->mask);
|
||||||
tk->cycle_last = clock->cycle_last = cycle_now;
|
tk->cycle_last = cycle_now;
|
||||||
|
|
||||||
tk->xtime_nsec += delta * tk->mult;
|
tk->xtime_nsec += delta * tk->mult;
|
||||||
|
|
||||||
@ -1020,13 +1021,13 @@ static void timekeeping_resume(void)
|
|||||||
*/
|
*/
|
||||||
cycle_now = clock->read(clock);
|
cycle_now = clock->read(clock);
|
||||||
if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) &&
|
if ((clock->flags & CLOCK_SOURCE_SUSPEND_NONSTOP) &&
|
||||||
cycle_now > clock->cycle_last) {
|
cycle_now > tk->cycle_last) {
|
||||||
u64 num, max = ULLONG_MAX;
|
u64 num, max = ULLONG_MAX;
|
||||||
u32 mult = clock->mult;
|
u32 mult = clock->mult;
|
||||||
u32 shift = clock->shift;
|
u32 shift = clock->shift;
|
||||||
s64 nsec = 0;
|
s64 nsec = 0;
|
||||||
|
|
||||||
cycle_delta = clocksource_delta(cycle_now, clock->cycle_last,
|
cycle_delta = clocksource_delta(cycle_now, tk->cycle_last,
|
||||||
clock->mask);
|
clock->mask);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1053,7 +1054,7 @@ static void timekeeping_resume(void)
|
|||||||
__timekeeping_inject_sleeptime(tk, &ts_delta);
|
__timekeeping_inject_sleeptime(tk, &ts_delta);
|
||||||
|
|
||||||
/* Re-base the last cycle value */
|
/* Re-base the last cycle value */
|
||||||
tk->cycle_last = clock->cycle_last = cycle_now;
|
tk->cycle_last = cycle_now;
|
||||||
tk->ntp_error = 0;
|
tk->ntp_error = 0;
|
||||||
timekeeping_suspended = 0;
|
timekeeping_suspended = 0;
|
||||||
timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
|
timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
|
||||||
@ -1433,7 +1434,7 @@ void update_wall_time(void)
|
|||||||
#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
|
#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
|
||||||
offset = real_tk->cycle_interval;
|
offset = real_tk->cycle_interval;
|
||||||
#else
|
#else
|
||||||
offset = clocksource_delta(clock->read(clock), clock->cycle_last,
|
offset = clocksource_delta(clock->read(clock), tk->cycle_last,
|
||||||
clock->mask);
|
clock->mask);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
@ -1477,8 +1478,6 @@ void update_wall_time(void)
|
|||||||
clock_set |= accumulate_nsecs_to_secs(tk);
|
clock_set |= accumulate_nsecs_to_secs(tk);
|
||||||
|
|
||||||
write_seqcount_begin(&tk_core.seq);
|
write_seqcount_begin(&tk_core.seq);
|
||||||
/* Update clock->cycle_last with the new value */
|
|
||||||
clock->cycle_last = tk->cycle_last;
|
|
||||||
/*
|
/*
|
||||||
* Update the real timekeeper.
|
* Update the real timekeeper.
|
||||||
*
|
*
|
||||||
|
Loading…
Reference in New Issue
Block a user