Commit 57103eb7 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull perf fixes from Ingo Molnar:
 "Various fixes, most of them related to bugs perf fuzzing found in the
  x86 code"

* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  perf/x86/regs: Use PERF_REG_EXTENDED_MASK
  perf/x86: Remove pmu->pebs_no_xmm_regs
  perf/x86: Clean up PEBS_XMM_REGS
  perf/x86/regs: Check reserved bits
  perf/x86: Disable extended registers for non-supported PMUs
  perf/ioctl: Add check for the sample_period value
  perf/core: Fix perf_sample_regs_user() mm check
parents eed7d30e 8b12b812
Loading
Loading
Loading
Loading
+3 −3
Original line number Diff line number Diff line
@@ -561,14 +561,14 @@ int x86_pmu_hw_config(struct perf_event *event)
	}

	/* sample_regs_user never support XMM registers */
	if (unlikely(event->attr.sample_regs_user & PEBS_XMM_REGS))
	if (unlikely(event->attr.sample_regs_user & PERF_REG_EXTENDED_MASK))
		return -EINVAL;
	/*
	 * Besides the general purpose registers, XMM registers may
	 * be collected in PEBS on some platforms, e.g. Icelake
	 */
	if (unlikely(event->attr.sample_regs_intr & PEBS_XMM_REGS)) {
		if (x86_pmu.pebs_no_xmm_regs)
	if (unlikely(event->attr.sample_regs_intr & PERF_REG_EXTENDED_MASK)) {
		if (!(event->pmu->capabilities & PERF_PMU_CAP_EXTENDED_REGS))
			return -EINVAL;

		if (!event->attr.precise_ip)
+4 −5
Original line number Diff line number Diff line
@@ -987,7 +987,7 @@ static u64 pebs_update_adaptive_cfg(struct perf_event *event)
		pebs_data_cfg |= PEBS_DATACFG_GP;

	if ((sample_type & PERF_SAMPLE_REGS_INTR) &&
	    (attr->sample_regs_intr & PEBS_XMM_REGS))
	    (attr->sample_regs_intr & PERF_REG_EXTENDED_MASK))
		pebs_data_cfg |= PEBS_DATACFG_XMMS;

	if (sample_type & PERF_SAMPLE_BRANCH_STACK) {
@@ -1964,10 +1964,9 @@ void __init intel_ds_init(void)
	x86_pmu.bts  = boot_cpu_has(X86_FEATURE_BTS);
	x86_pmu.pebs = boot_cpu_has(X86_FEATURE_PEBS);
	x86_pmu.pebs_buffer_size = PEBS_BUFFER_SIZE;
	if (x86_pmu.version <= 4) {
	if (x86_pmu.version <= 4)
		x86_pmu.pebs_no_isolation = 1;
		x86_pmu.pebs_no_xmm_regs = 1;
	}

	if (x86_pmu.pebs) {
		char pebs_type = x86_pmu.intel_cap.pebs_trap ?  '+' : '-';
		char *pebs_qual = "";
@@ -2020,9 +2019,9 @@ void __init intel_ds_init(void)
					PERF_SAMPLE_TIME;
				x86_pmu.flags |= PMU_FL_PEBS_ALL;
				pebs_qual = "-baseline";
				x86_get_pmu()->capabilities |= PERF_PMU_CAP_EXTENDED_REGS;
			} else {
				/* Only basic record supported */
				x86_pmu.pebs_no_xmm_regs = 1;
				x86_pmu.large_pebs_flags &=
					~(PERF_SAMPLE_ADDR |
					  PERF_SAMPLE_TIME |
+1 −20
Original line number Diff line number Diff line
@@ -121,24 +121,6 @@ struct amd_nb {
	 (1ULL << PERF_REG_X86_R14)   | \
	 (1ULL << PERF_REG_X86_R15))

#define PEBS_XMM_REGS                   \
	((1ULL << PERF_REG_X86_XMM0)  | \
	 (1ULL << PERF_REG_X86_XMM1)  | \
	 (1ULL << PERF_REG_X86_XMM2)  | \
	 (1ULL << PERF_REG_X86_XMM3)  | \
	 (1ULL << PERF_REG_X86_XMM4)  | \
	 (1ULL << PERF_REG_X86_XMM5)  | \
	 (1ULL << PERF_REG_X86_XMM6)  | \
	 (1ULL << PERF_REG_X86_XMM7)  | \
	 (1ULL << PERF_REG_X86_XMM8)  | \
	 (1ULL << PERF_REG_X86_XMM9)  | \
	 (1ULL << PERF_REG_X86_XMM10) | \
	 (1ULL << PERF_REG_X86_XMM11) | \
	 (1ULL << PERF_REG_X86_XMM12) | \
	 (1ULL << PERF_REG_X86_XMM13) | \
	 (1ULL << PERF_REG_X86_XMM14) | \
	 (1ULL << PERF_REG_X86_XMM15))

/*
 * Per register state.
 */
@@ -668,8 +650,7 @@ struct x86_pmu {
			pebs_broken		:1,
			pebs_prec_dist		:1,
			pebs_no_tlb		:1,
			pebs_no_isolation	:1,
			pebs_no_xmm_regs	:1;
			pebs_no_isolation	:1;
	int		pebs_record_size;
	int		pebs_buffer_size;
	int		max_pebs_events;
+3 −0
Original line number Diff line number Diff line
@@ -52,4 +52,7 @@ enum perf_event_x86_regs {
	/* These include both GPRs and XMMX registers */
	PERF_REG_X86_XMM_MAX = PERF_REG_X86_XMM15 + 2,
};

#define PERF_REG_EXTENDED_MASK	(~((1ULL << PERF_REG_X86_XMM0) - 1))

#endif /* _ASM_X86_PERF_REGS_H */
+5 −2
Original line number Diff line number Diff line
@@ -74,6 +74,9 @@ u64 perf_reg_value(struct pt_regs *regs, int idx)
	return regs_get_register(regs, pt_regs_offset[idx]);
}

#define PERF_REG_X86_RESERVED	(((1ULL << PERF_REG_X86_XMM0) - 1) & \
				 ~((1ULL << PERF_REG_X86_MAX) - 1))

#ifdef CONFIG_X86_32
#define REG_NOSUPPORT ((1ULL << PERF_REG_X86_R8) | \
		       (1ULL << PERF_REG_X86_R9) | \
@@ -86,7 +89,7 @@ u64 perf_reg_value(struct pt_regs *regs, int idx)

int perf_reg_validate(u64 mask)
{
	if (!mask || (mask & REG_NOSUPPORT))
	if (!mask || (mask & (REG_NOSUPPORT | PERF_REG_X86_RESERVED)))
		return -EINVAL;

	return 0;
@@ -112,7 +115,7 @@ void perf_get_regs_user(struct perf_regs *regs_user,

int perf_reg_validate(u64 mask)
{
	if (!mask || (mask & REG_NOSUPPORT))
	if (!mask || (mask & (REG_NOSUPPORT | PERF_REG_X86_RESERVED)))
		return -EINVAL;

	return 0;
Loading