Commit a82e4ef0 authored by Will Deacon's avatar Will Deacon
Browse files

Merge branch 'for-next/late-arrivals' into for-next/core

Late patches for 5.10: MTE selftests, minor KCSAN preparation and removal
of some unused prototypes.

(Amit Daniel Kachhap and others)
* for-next/late-arrivals:
  arm64: random: Remove no longer needed prototypes
  arm64: initialize per-cpu offsets earlier
  kselftest/arm64: Check mte tagged user address in kernel
  kselftest/arm64: Verify KSM page merge for MTE pages
  kselftest/arm64: Verify all different mmap MTE options
  kselftest/arm64: Check forked child mte memory accessibility
  kselftest/arm64: Verify mte tag inclusion via prctl
  kselftest/arm64: Add utilities and a test to validate mte memory
parents baab8532 d433ab42
Loading
Loading
Loading
Loading
+0 −5
Original line number Diff line number Diff line
@@ -79,10 +79,5 @@ arch_get_random_seed_long_early(unsigned long *v)
}
#define arch_get_random_seed_long_early arch_get_random_seed_long_early

#else

static inline bool __arm64_rndr(unsigned long *v) { return false; }
static inline bool __init __early_cpu_has_rndr(void) { return false; }

#endif /* CONFIG_ARCH_RANDOM */
#endif /* _ASM_ARCHRANDOM_H */
+2 −0
Original line number Diff line number Diff line
@@ -68,4 +68,6 @@ void __init init_cpu_features(struct cpuinfo_arm64 *info);
void update_cpu_features(int cpu, struct cpuinfo_arm64 *info,
				 struct cpuinfo_arm64 *boot);

void init_this_cpu_offset(void);

#endif /* __ASM_CPU_H */
+3 −0
Original line number Diff line number Diff line
@@ -448,6 +448,8 @@ SYM_FUNC_START_LOCAL(__primary_switched)
	bl	__pi_memset
	dsb	ishst				// Make zero page visible to PTW

	bl	init_this_cpu_offset

#ifdef CONFIG_KASAN
	bl	kasan_early_init
#endif
@@ -754,6 +756,7 @@ SYM_FUNC_START_LOCAL(__secondary_switched)
	ptrauth_keys_init_cpu x2, x3, x4, x5
#endif

	bl	init_this_cpu_offset
	b	secondary_start_kernel
SYM_FUNC_END(__secondary_switched)

+6 −6
Original line number Diff line number Diff line
@@ -87,12 +87,6 @@ void __init smp_setup_processor_id(void)
	u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
	set_cpu_logical_map(0, mpidr);

	/*
	 * clear __my_cpu_offset on boot CPU to avoid hang caused by
	 * using percpu variable early, for example, lockdep will
	 * access percpu variable inside lock_release
	 */
	set_my_cpu_offset(0);
	pr_info("Booting Linux on physical CPU 0x%010lx [0x%08x]\n",
		(unsigned long)mpidr, read_cpuid_id());
}
@@ -282,6 +276,12 @@ u64 cpu_logical_map(int cpu)
}
EXPORT_SYMBOL_GPL(cpu_logical_map);

void noinstr init_this_cpu_offset(void)
{
	unsigned int cpu = task_cpu(current);
	set_my_cpu_offset(per_cpu_offset(cpu));
}

void __init __no_sanitize_address setup_arch(char **cmdline_p)
{
	init_mm.start_code = (unsigned long) _text;
+8 −5
Original line number Diff line number Diff line
@@ -192,10 +192,7 @@ asmlinkage notrace void secondary_start_kernel(void)
	u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
	struct mm_struct *mm = &init_mm;
	const struct cpu_operations *ops;
	unsigned int cpu;

	cpu = task_cpu(current);
	set_my_cpu_offset(per_cpu_offset(cpu));
	unsigned int cpu = smp_processor_id();

	/*
	 * All kernel threads share the same mm context; grab a
@@ -435,7 +432,13 @@ void __init smp_cpus_done(unsigned int max_cpus)

void __init smp_prepare_boot_cpu(void)
{
	set_my_cpu_offset(per_cpu_offset(smp_processor_id()));
	/*
	 * Now that setup_per_cpu_areas() has allocated the runtime per-cpu
	 * areas it is only safe to read the CPU0 boot-time area, and we must
	 * reinitialize the offset to point to the runtime area.
	 */
	init_this_cpu_offset();

	cpuinfo_store_boot_cpu();

	/*
Loading