Commit c8f9eb0d authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Will Deacon
Browse files

arm64: perf: Add cap_user_time_short



This completes the ARM64 cap_user_time support.

Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: default avatarLeo Yan <leo.yan@linaro.org>
Link: https://lore.kernel.org/r/20200716051130.4359-7-leo.yan@linaro.org


Signed-off-by: default avatarWill Deacon <will@kernel.org>
parent 6c0246a4
Loading
Loading
Loading
Loading
+7 −5
Original line number Diff line number Diff line
@@ -1177,6 +1177,7 @@ void arch_perf_update_userpage(struct perf_event *event,

	userpg->cap_user_time = 0;
	userpg->cap_user_time_zero = 0;
	userpg->cap_user_time_short = 0;

	do {
		rd = sched_clock_read_begin(&seq);
@@ -1187,13 +1188,13 @@ void arch_perf_update_userpage(struct perf_event *event,
		userpg->time_mult = rd->mult;
		userpg->time_shift = rd->shift;
		userpg->time_zero = rd->epoch_ns;
		userpg->time_cycles = rd->epoch_cyc;
		userpg->time_mask = rd->sched_clock_mask;

		/*
		 * This isn't strictly correct, the ARM64 counter can be
		 * 'short' and then we get funnies when it wraps. The correct
		 * thing would be to extend the perf ABI with a cycle and mask
		 * value, but because wrapping on ARM64 is very rare in
		 * practise this 'works'.
		 * Subtract the cycle base, such that software that
		 * doesn't know about cap_user_time_short still 'works'
		 * assuming no wraps.
		 */
		ns = mul_u64_u32_shr(rd->epoch_cyc, rd->mult, rd->shift);
		userpg->time_zero -= ns;
@@ -1219,4 +1220,5 @@ void arch_perf_update_userpage(struct perf_event *event,
	 */
	userpg->cap_user_time = 1;
	userpg->cap_user_time_zero = 1;
	userpg->cap_user_time_short = 1;
}