Commit 2b835e24 authored by Andrey Konovalov's avatar Andrey Konovalov Committed by Will Deacon
Browse files

arm64: untag user pointers in access_ok and __uaccess_mask_ptr



This patch is a part of a series that extends kernel ABI to allow to pass
tagged user pointers (with the top byte set to something else other than
0x00) as syscall arguments.

copy_from_user (and a few other similar functions) are used to copy data
from user memory into the kernel memory or vice versa. Since a user can
provided a tagged pointer to one of the syscalls that use copy_from_user,
we need to correctly handle such pointers.

Do this by untagging user pointers in access_ok and in __uaccess_mask_ptr,
before performing access validity checks.

Note, that this patch only temporarily untags the pointers to perform the
checks, but then passes them as is into the kernel internals.

Reviewed-by: default avatarVincenzo Frascino <vincenzo.frascino@arm.com>
Reviewed-by: default avatarKees Cook <keescook@chromium.org>
Reviewed-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
Signed-off-by: default avatarAndrey Konovalov <andreyknvl@google.com>
[will: Add __force to casting in untagged_addr() to kill sparse warning]
Signed-off-by: default avatarWill Deacon <will@kernel.org>
parent e21a712a
Loading
Loading
Loading
Loading
+1 −1
Original line number Original line Diff line number Diff line
@@ -201,7 +201,7 @@ extern u64 vabits_user;
 * pass on to access_ok(), for instance.
 * pass on to access_ok(), for instance.
 */
 */
#define untagged_addr(addr)	\
#define untagged_addr(addr)	\
	((__typeof__(addr))sign_extend64((u64)(addr), 55))
	((__typeof__(addr))sign_extend64((__force u64)(addr), 55))


#ifdef CONFIG_KASAN_SW_TAGS
#ifdef CONFIG_KASAN_SW_TAGS
#define __tag_shifted(tag)	((u64)(tag) << 56)
#define __tag_shifted(tag)	((u64)(tag) << 56)
+7 −3
Original line number Original line Diff line number Diff line
@@ -62,6 +62,8 @@ static inline unsigned long __range_ok(const void __user *addr, unsigned long si
{
{
	unsigned long ret, limit = current_thread_info()->addr_limit;
	unsigned long ret, limit = current_thread_info()->addr_limit;


	addr = untagged_addr(addr);

	__chk_user_ptr(addr);
	__chk_user_ptr(addr);
	asm volatile(
	asm volatile(
	// A + B <= C + 1 for all A,B,C, in four easy steps:
	// A + B <= C + 1 for all A,B,C, in four easy steps:
@@ -215,7 +217,8 @@ static inline void uaccess_enable_not_uao(void)


/*
/*
 * Sanitise a uaccess pointer such that it becomes NULL if above the
 * Sanitise a uaccess pointer such that it becomes NULL if above the
 * current addr_limit.
 * current addr_limit. In case the pointer is tagged (has the top byte set),
 * untag the pointer before checking.
 */
 */
#define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr)
#define uaccess_mask_ptr(ptr) (__typeof__(ptr))__uaccess_mask_ptr(ptr)
static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
@@ -223,10 +226,11 @@ static inline void __user *__uaccess_mask_ptr(const void __user *ptr)
	void __user *safe_ptr;
	void __user *safe_ptr;


	asm volatile(
	asm volatile(
	"	bics	xzr, %1, %2\n"
	"	bics	xzr, %3, %2\n"
	"	csel	%0, %1, xzr, eq\n"
	"	csel	%0, %1, xzr, eq\n"
	: "=&r" (safe_ptr)
	: "=&r" (safe_ptr)
	: "r" (ptr), "r" (current_thread_info()->addr_limit)
	: "r" (ptr), "r" (current_thread_info()->addr_limit),
	  "r" (untagged_addr(ptr))
	: "cc");
	: "cc");


	csdb();
	csdb();