Commit 5ad272ab authored by Peter Zijlstra's avatar Peter Zijlstra Committed by Geert Uytterhoeven
Browse files

m68k: mm: Move the pointer table allocator to motorola.c



Only the Motorola MMU makes use of this allocator, it is a waste of
.text to include it for Sun3/ColdFire. Also, this is going to avoid
build issues when we're going to make it more Motorola specific.

Signed-off-by: default avatarPeter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: default avatarWill Deacon <will@kernel.org>
Acked-by: default avatarGreg Ungerer <gerg@linux-m68k.org>
Tested-by: default avatarMichael Schmitz <schmitzmic@gmail.com>
Tested-by: default avatarGreg Ungerer <gerg@linux-m68k.org>
Link: https://lore.kernel.org/r/20200131125403.654652162@infradead.org


Signed-off-by: default avatarGeert Uytterhoeven <geert@linux-m68k.org>
parent 13076a29
Loading
Loading
Loading
Loading
+0 −102
Original line number Diff line number Diff line
@@ -22,108 +22,6 @@
#include <asm/machdep.h>


/* ++andreas: {get,free}_pointer_table rewritten to use unused fields from
   struct page instead of separately kmalloced struct.  Stolen from
   arch/sparc/mm/srmmu.c ... */

typedef struct list_head ptable_desc;
static LIST_HEAD(ptable_list);

#define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page(page)->lru))
#define PD_PAGE(ptable) (list_entry(ptable, struct page, lru))
#define PD_MARKBITS(dp) (*(unsigned char *)&PD_PAGE(dp)->index)

#define PTABLE_SIZE (PTRS_PER_PMD * sizeof(pmd_t))

void __init init_pointer_table(unsigned long ptable)
{
	ptable_desc *dp;
	unsigned long page = ptable & PAGE_MASK;
	unsigned char mask = 1 << ((ptable - page)/PTABLE_SIZE);

	dp = PD_PTABLE(page);
	if (!(PD_MARKBITS(dp) & mask)) {
		PD_MARKBITS(dp) = 0xff;
		list_add(dp, &ptable_list);
	}

	PD_MARKBITS(dp) &= ~mask;
	pr_debug("init_pointer_table: %lx, %x\n", ptable, PD_MARKBITS(dp));

	/* unreserve the page so it's possible to free that page */
	__ClearPageReserved(PD_PAGE(dp));
	init_page_count(PD_PAGE(dp));

	return;
}

pmd_t *get_pointer_table (void)
{
	ptable_desc *dp = ptable_list.next;
	unsigned char mask = PD_MARKBITS (dp);
	unsigned char tmp;
	unsigned int off;

	/*
	 * For a pointer table for a user process address space, a
	 * table is taken from a page allocated for the purpose.  Each
	 * page can hold 8 pointer tables.  The page is remapped in
	 * virtual address space to be noncacheable.
	 */
	if (mask == 0) {
		void *page;
		ptable_desc *new;

		if (!(page = (void *)get_zeroed_page(GFP_KERNEL)))
			return NULL;

		mmu_page_ctor(page);

		new = PD_PTABLE(page);
		PD_MARKBITS(new) = 0xfe;
		list_add_tail(new, dp);

		return (pmd_t *)page;
	}

	for (tmp = 1, off = 0; (mask & tmp) == 0; tmp <<= 1, off += PTABLE_SIZE)
		;
	PD_MARKBITS(dp) = mask & ~tmp;
	if (!PD_MARKBITS(dp)) {
		/* move to end of list */
		list_move_tail(dp, &ptable_list);
	}
	return (pmd_t *) (page_address(PD_PAGE(dp)) + off);
}

int free_pointer_table (pmd_t *ptable)
{
	ptable_desc *dp;
	unsigned long page = (unsigned long)ptable & PAGE_MASK;
	unsigned char mask = 1 << (((unsigned long)ptable - page)/PTABLE_SIZE);

	dp = PD_PTABLE(page);
	if (PD_MARKBITS (dp) & mask)
		panic ("table already free!");

	PD_MARKBITS (dp) |= mask;

	if (PD_MARKBITS(dp) == 0xff) {
		/* all tables in page are free, free page */
		list_del(dp);
		mmu_page_dtor((void *)page);
		free_page (page);
		return 1;
	} else if (ptable_list.next != dp) {
		/*
		 * move this descriptor to the front of the list, since
		 * it has one or more free tables.
		 */
		list_move(dp, &ptable_list);
	}
	return 0;
}

/* invalidate page in both caches */
static inline void clear040(unsigned long paddr)
{
+102 −0
Original line number Diff line number Diff line
@@ -67,6 +67,108 @@ void mmu_page_dtor(void *page)
	cache_page(page);
}

/* ++andreas: {get,free}_pointer_table rewritten to use unused fields from
   struct page instead of separately kmalloced struct.  Stolen from
   arch/sparc/mm/srmmu.c ... */

typedef struct list_head ptable_desc;
static LIST_HEAD(ptable_list);

#define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page(page)->lru))
#define PD_PAGE(ptable) (list_entry(ptable, struct page, lru))
#define PD_MARKBITS(dp) (*(unsigned char *)&PD_PAGE(dp)->index)

#define PTABLE_SIZE (PTRS_PER_PMD * sizeof(pmd_t))

void __init init_pointer_table(unsigned long ptable)
{
	ptable_desc *dp;
	unsigned long page = ptable & PAGE_MASK;
	unsigned char mask = 1 << ((ptable - page)/PTABLE_SIZE);

	dp = PD_PTABLE(page);
	if (!(PD_MARKBITS(dp) & mask)) {
		PD_MARKBITS(dp) = 0xff;
		list_add(dp, &ptable_list);
	}

	PD_MARKBITS(dp) &= ~mask;
	pr_debug("init_pointer_table: %lx, %x\n", ptable, PD_MARKBITS(dp));

	/* unreserve the page so it's possible to free that page */
	__ClearPageReserved(PD_PAGE(dp));
	init_page_count(PD_PAGE(dp));

	return;
}

pmd_t *get_pointer_table (void)
{
	ptable_desc *dp = ptable_list.next;
	unsigned char mask = PD_MARKBITS (dp);
	unsigned char tmp;
	unsigned int off;

	/*
	 * For a pointer table for a user process address space, a
	 * table is taken from a page allocated for the purpose.  Each
	 * page can hold 8 pointer tables.  The page is remapped in
	 * virtual address space to be noncacheable.
	 */
	if (mask == 0) {
		void *page;
		ptable_desc *new;

		if (!(page = (void *)get_zeroed_page(GFP_KERNEL)))
			return NULL;

		mmu_page_ctor(page);

		new = PD_PTABLE(page);
		PD_MARKBITS(new) = 0xfe;
		list_add_tail(new, dp);

		return (pmd_t *)page;
	}

	for (tmp = 1, off = 0; (mask & tmp) == 0; tmp <<= 1, off += PTABLE_SIZE)
		;
	PD_MARKBITS(dp) = mask & ~tmp;
	if (!PD_MARKBITS(dp)) {
		/* move to end of list */
		list_move_tail(dp, &ptable_list);
	}
	return (pmd_t *) (page_address(PD_PAGE(dp)) + off);
}

int free_pointer_table (pmd_t *ptable)
{
	ptable_desc *dp;
	unsigned long page = (unsigned long)ptable & PAGE_MASK;
	unsigned char mask = 1 << (((unsigned long)ptable - page)/PTABLE_SIZE);

	dp = PD_PTABLE(page);
	if (PD_MARKBITS (dp) & mask)
		panic ("table already free!");

	PD_MARKBITS (dp) |= mask;

	if (PD_MARKBITS(dp) == 0xff) {
		/* all tables in page are free, free page */
		list_del(dp);
		mmu_page_dtor((void *)page);
		free_page (page);
		return 1;
	} else if (ptable_list.next != dp) {
		/*
		 * move this descriptor to the front of the list, since
		 * it has one or more free tables.
		 */
		list_move(dp, &ptable_list);
	}
	return 0;
}

/* size of memory already mapped in head.S */
extern __initdata unsigned long m68k_init_mapped_size;