Commit 963285b0 authored by Johannes Berg's avatar Johannes Berg Committed by Richard Weinberger
Browse files

um: support some of ARCH_HAS_SET_MEMORY



For now, only support set_memory_ro()/rw() which we need for
the stack protection in the next patch.

Signed-off-by: default avatarJohannes Berg <johannes.berg@intel.com>
Signed-off-by: default avatarRichard Weinberger <richard@nod.at>
parent 58b09f68
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -15,6 +15,7 @@ config UML
	select HAVE_DEBUG_KMEMLEAK
	select HAVE_DEBUG_BUGVERBOSE
	select NO_DMA
	select ARCH_HAS_SET_MEMORY
	select GENERIC_IRQ_SHOW
	select GENERIC_CPU_DEVICES
	select GENERIC_CLOCKEVENTS
+3 −0
Original line number Diff line number Diff line
@@ -55,12 +55,15 @@ extern unsigned long end_iomem;
#define _PAGE_CHG_MASK	(PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
#define __PAGE_KERNEL_EXEC                                              \
	 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
#define __PAGE_KERNEL_RO						\
	 (_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED)
#define PAGE_NONE	__pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
#define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
#define PAGE_COPY	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
#define PAGE_READONLY	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
#define PAGE_KERNEL	__pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
#define PAGE_KERNEL_EXEC	__pgprot(__PAGE_KERNEL_EXEC)
#define PAGE_KERNEL_RO		__pgprot(__PAGE_KERNEL_RO)

/*
 * The i386 can't do page protection for execute, and considers that the same
+1 −0
Original line number Diff line number Diff line
#include <asm-generic/set_memory.h>
+54 −0
Original line number Diff line number Diff line
@@ -608,3 +608,57 @@ void force_flush_all(void)
		vma = vma->vm_next;
	}
}

struct page_change_data {
	unsigned int set_mask, clear_mask;
};

static int change_page_range(pte_t *ptep, unsigned long addr, void *data)
{
	struct page_change_data *cdata = data;
	pte_t pte = READ_ONCE(*ptep);

	pte_clear_bits(pte, cdata->clear_mask);
	pte_set_bits(pte, cdata->set_mask);

	set_pte(ptep, pte);
	return 0;
}

static int change_memory(unsigned long start, unsigned long pages,
			 unsigned int set_mask, unsigned int clear_mask)
{
	unsigned long size = pages * PAGE_SIZE;
	struct page_change_data data;
	int ret;

	data.set_mask = set_mask;
	data.clear_mask = clear_mask;

	ret = apply_to_page_range(&init_mm, start, size, change_page_range,
				  &data);

	flush_tlb_kernel_range(start, start + size);

	return ret;
}

int set_memory_ro(unsigned long addr, int numpages)
{
	return change_memory(addr, numpages, 0, _PAGE_RW);
}

int set_memory_rw(unsigned long addr, int numpages)
{
	return change_memory(addr, numpages, _PAGE_RW, 0);
}

int set_memory_nx(unsigned long addr, int numpages)
{
	return -EOPNOTSUPP;
}

int set_memory_x(unsigned long addr, int numpages)
{
	return -EOPNOTSUPP;
}