Commit a39060b0 authored by Will Deacon's avatar Will Deacon
Browse files

arm64: compat: Allow 32-bit vdso and sigpage to co-exist



In preparation for removing the signal trampoline from the compat vDSO,
allow the sigpage and the compat vDSO to co-exist.

For the moment the vDSO signal trampoline will still be used when built.
Subsequent patches will move to the sigpage consistently.

Acked-by: default avatarDave Martin <Dave.Martin@arm.com>
Reviewed-by: default avatarVincenzo Frascino <vincenzo.frascino@arm.com>
Reviewed-by: default avatarArd Biesheuvel <ardb@kernel.org>
Reviewed-by: default avatarMark Rutland <mark.rutland@arm.com>
Signed-off-by: default avatarWill Deacon <will@kernel.org>
parent 87676cfc
Loading
Loading
Loading
Loading
+3 −0
Original line number Diff line number Diff line
@@ -19,6 +19,9 @@

typedef struct {
	atomic64_t	id;
#ifdef CONFIG_COMPAT
	void		*sigpage;
#endif
	void		*vdso;
	unsigned long	flags;
} mm_context_t;
+0 −2
Original line number Diff line number Diff line
@@ -29,9 +29,7 @@ $(obj)/%.stub.o: $(obj)/%.o FORCE

obj-$(CONFIG_COMPAT)			+= sys32.o signal32.o			\
					   sys_compat.o
ifneq ($(CONFIG_COMPAT_VDSO), y)
obj-$(CONFIG_COMPAT)			+= sigreturn32.o
endif
obj-$(CONFIG_KUSER_HELPERS)		+= kuser32.o
obj-$(CONFIG_FUNCTION_TRACER)		+= ftrace.o entry-ftrace.o
obj-$(CONFIG_MODULES)			+= module.o
+1 −1
Original line number Diff line number Diff line
@@ -371,7 +371,7 @@ static void compat_setup_return(struct pt_regs *regs, struct k_sigaction *ka,
		if (ka->sa.sa_flags & SA_SIGINFO)
			idx += 3;

		retcode = (unsigned long)current->mm->context.vdso +
		retcode = (unsigned long)current->mm->context.sigpage +
			  (idx << 2) + thumb;
#endif
	}
+28 −33
Original line number Diff line number Diff line
@@ -191,15 +191,12 @@ enum aarch32_map {
#ifdef CONFIG_COMPAT_VDSO
	AA32_MAP_VVAR,
	AA32_MAP_VDSO,
#else
	AA32_MAP_SIGPAGE
#endif
	AA32_MAP_SIGPAGE
};

static struct page *aarch32_vectors_page __ro_after_init;
#ifndef CONFIG_COMPAT_VDSO
static struct page *aarch32_sig_page __ro_after_init;
#endif

static struct vm_special_mapping aarch32_vdso_maps[] = {
	[AA32_MAP_VECTORS] = {
@@ -214,12 +211,11 @@ static struct vm_special_mapping aarch32_vdso_maps[] = {
		.name = "[vdso]",
		.mremap = aarch32_vdso_mremap,
	},
#else
#endif /* CONFIG_COMPAT_VDSO */
	[AA32_MAP_SIGPAGE] = {
		.name	= "[sigpage]", /* ABI */
		.pages	= &aarch32_sig_page,
	},
#endif /* CONFIG_COMPAT_VDSO */
};

static int aarch32_alloc_kuser_vdso_page(void)
@@ -242,27 +238,11 @@ static int aarch32_alloc_kuser_vdso_page(void)
	return 0;
}

#ifdef CONFIG_COMPAT_VDSO
static int __aarch32_alloc_vdso_pages(void)
{
	int ret;

	vdso_info[VDSO_ABI_AA32].dm = &aarch32_vdso_maps[AA32_MAP_VVAR];
	vdso_info[VDSO_ABI_AA32].cm = &aarch32_vdso_maps[AA32_MAP_VDSO];

	ret = __vdso_init(VDSO_ABI_AA32);
	if (ret)
		return ret;

	return aarch32_alloc_kuser_vdso_page();
}
#else
static int __aarch32_alloc_vdso_pages(void)
static int aarch32_alloc_sigpage(void)
{
	extern char __aarch32_sigret_code_start[], __aarch32_sigret_code_end[];
	int sigret_sz = __aarch32_sigret_code_end - __aarch32_sigret_code_start;
	unsigned long sigpage;
	int ret;

	sigpage = get_zeroed_page(GFP_ATOMIC);
	if (!sigpage)
@@ -271,18 +251,34 @@ static int __aarch32_alloc_vdso_pages(void)
	memcpy((void *)sigpage, __aarch32_sigret_code_start, sigret_sz);
	aarch32_sig_page = virt_to_page(sigpage);
	flush_dcache_page(aarch32_sig_page);
	return 0;
}

	ret = aarch32_alloc_kuser_vdso_page();
	if (ret)
		free_page(sigpage);
#ifdef CONFIG_COMPAT_VDSO
static int __aarch32_alloc_vdso_pages(void)
{
	vdso_info[VDSO_ABI_AA32].dm = &aarch32_vdso_maps[AA32_MAP_VVAR];
	vdso_info[VDSO_ABI_AA32].cm = &aarch32_vdso_maps[AA32_MAP_VDSO];

	return ret;
	return __vdso_init(VDSO_ABI_AA32);
}
#endif /* CONFIG_COMPAT_VDSO */

static int __init aarch32_alloc_vdso_pages(void)
{
	return __aarch32_alloc_vdso_pages();
	int ret;

#ifdef CONFIG_COMPAT_VDSO
	ret = __aarch32_alloc_vdso_pages();
	if (ret)
		return ret;
#endif

	ret = aarch32_alloc_sigpage();
	if (ret)
		return ret;

	return aarch32_alloc_kuser_vdso_page();
}
arch_initcall(aarch32_alloc_vdso_pages);

@@ -305,7 +301,6 @@ static int aarch32_kuser_helpers_setup(struct mm_struct *mm)
	return PTR_ERR_OR_ZERO(ret);
}

#ifndef CONFIG_COMPAT_VDSO
static int aarch32_sigreturn_setup(struct mm_struct *mm)
{
	unsigned long addr;
@@ -328,12 +323,11 @@ static int aarch32_sigreturn_setup(struct mm_struct *mm)
	if (IS_ERR(ret))
		goto out;

	mm->context.vdso = (void *)addr;
	mm->context.sigpage = (void *)addr;

out:
	return PTR_ERR_OR_ZERO(ret);
}
#endif /* !CONFIG_COMPAT_VDSO */

int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
{
@@ -352,10 +346,11 @@ int aarch32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
				       mm,
				       bprm,
				       uses_interp);
#else
	ret = aarch32_sigreturn_setup(mm);
	if (ret)
		goto out;
#endif /* CONFIG_COMPAT_VDSO */

	ret = aarch32_sigreturn_setup(mm);
out:
	mmap_write_unlock(mm);
	return ret;