Commit 23bdf86a authored by Lennert Buytenhek's avatar Lennert Buytenhek Committed by Russell King
Browse files

[ARM] 3377/2: add support for intel xsc3 core



Patch from Lennert Buytenhek

This patch adds support for the new XScale v3 core.  This is an
ARMv5 ISA core with the following additions:

- L2 cache
- I/O coherency support (on select chipsets)
- Low-Locality Reference cache attributes (replaces mini-cache)
- Supersections (v6 compatible)
- 36-bit addressing (v6 compatible)
- Single instruction cache line clean/invalidate
- LRU cache replacement (vs round-robin)

I attempted to merge the XSC3 support into proc-xscale.S, but XSC3
cores have separate errata and have to handle things like L2, so it
is simpler to keep it separate.

L2 cache support is currently a build option because the L2 enable
bit must be set before we enable the MMU and there is no easy way to
capture command line parameters at this point.

There are still optimizations that can be done such as using LLR for
copypage (in theory using the exisiting mini-cache code) but those
can be addressed down the road.

Signed-off-by: default avatarDeepak Saxena <dsaxena@plexity.net>
Signed-off-by: default avatarLennert Buytenhek <buytenh@wantstofly.org>
Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
parent de4533a0
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -57,6 +57,7 @@ tune-$(CONFIG_CPU_ARM926T) :=-mtune=arm9tdmi
tune-$(CONFIG_CPU_SA110)	:=-mtune=strongarm110
tune-$(CONFIG_CPU_SA1100)	:=-mtune=strongarm1100
tune-$(CONFIG_CPU_XSCALE)	:=$(call cc-option,-mtune=xscale,-mtune=strongarm110) -Wa,-mcpu=xscale
tune-$(CONFIG_CPU_XSC3)		:=$(call cc-option,-mtune=xscale,-mtune=strongarm110) -Wa,-mcpu=xscale
tune-$(CONFIG_CPU_V6)		:=$(call cc-option,-mtune=arm1136j-s,-mtune=strongarm)

ifeq ($(CONFIG_AEABI),y)
+18 −1
Original line number Diff line number Diff line
@@ -239,6 +239,17 @@ config CPU_XSCALE
	select CPU_CACHE_VIVT
	select CPU_TLB_V4WBI

# XScale Core Version 3
config CPU_XSC3
	bool
	depends on ARCH_IXP23XX
	default y
	select CPU_32v5
	select CPU_ABRT_EV5T
	select CPU_CACHE_VIVT
	select CPU_TLB_V4WBI
	select IO_36

# ARMv6
config CPU_V6
	bool "Support ARM V6 processor"
@@ -361,11 +372,17 @@ config CPU_TLB_V4WBI
config CPU_TLB_V6
	bool

#
# CPU supports 36-bit I/O
#
config IO_36
	bool

comment "Processor Features"

config ARM_THUMB
	bool "Support Thumb user binaries"
	depends on CPU_ARM720T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_V6
	depends on CPU_ARM720T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_XSC3 || CPU_V6
	default y
	help
	  Say Y if you want to include kernel support for running user space
+2 −0
Original line number Diff line number Diff line
@@ -30,6 +30,7 @@ obj-$(CONFIG_CPU_COPY_V4WB) += copypage-v4wb.o
obj-$(CONFIG_CPU_COPY_V6)	+= copypage-v6.o mmu.o
obj-$(CONFIG_CPU_SA1100)	+= copypage-v4mc.o
obj-$(CONFIG_CPU_XSCALE)	+= copypage-xscale.o
obj-$(CONFIG_CPU_XSC3)		+= copypage-xsc3.o

obj-$(CONFIG_CPU_TLB_V3)	+= tlb-v3.o
obj-$(CONFIG_CPU_TLB_V4WT)	+= tlb-v4.o
@@ -51,4 +52,5 @@ obj-$(CONFIG_CPU_ARM1026) += proc-arm1026.o
obj-$(CONFIG_CPU_SA110)		+= proc-sa110.o
obj-$(CONFIG_CPU_SA1100)	+= proc-sa1100.o
obj-$(CONFIG_CPU_XSCALE)	+= proc-xscale.o
obj-$(CONFIG_CPU_XSC3)		+= proc-xsc3.o
obj-$(CONFIG_CPU_V6)		+= proc-v6.o
+97 −0
Original line number Diff line number Diff line
/*
 *  linux/arch/arm/lib/copypage-xsc3.S
 *
 *  Copyright (C) 2004 Intel Corp.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License version 2 as
 * published by the Free Software Foundation.
 *
 * Adapted for 3rd gen XScale core, no more mini-dcache
 * Author: Matt Gilbert (matthew.m.gilbert@intel.com)
 */

#include <linux/linkage.h>
#include <linux/init.h>
#include <asm/asm-offsets.h>

/*
 * General note:
 *  We don't really want write-allocate cache behaviour for these functions
 *  since that will just eat through 8K of the cache.
 */

	.text
	.align	5
/*
 * XSC3 optimised copy_user_page
 *  r0 = destination
 *  r1 = source
 *  r2 = virtual user address of ultimate destination page
 *
 * The source page may have some clean entries in the cache already, but we
 * can safely ignore them - break_cow() will flush them out of the cache
 * if we eventually end up using our copied page.
 *
 */
ENTRY(xsc3_mc_copy_user_page)
	stmfd	sp!, {r4, r5, lr}
	mov	lr, #PAGE_SZ/64-1

	pld	[r1, #0]
	pld	[r1, #32]
1:	pld	[r1, #64]
	pld	[r1, #96]

2:	ldrd	r2, [r1], #8
	mov	ip, r0
	ldrd	r4, [r1], #8
	mcr	p15, 0, ip, c7, c6, 1		@ invalidate
	strd	r2, [r0], #8
	ldrd	r2, [r1], #8
	strd	r4, [r0], #8
	ldrd	r4, [r1], #8
	strd	r2, [r0], #8
	strd	r4, [r0], #8
	ldrd	r2, [r1], #8
	mov	ip, r0
	ldrd	r4, [r1], #8
	mcr	p15, 0, ip, c7, c6, 1		@ invalidate
	strd	r2, [r0], #8
	ldrd	r2, [r1], #8
	subs	lr, lr, #1
	strd	r4, [r0], #8
	ldrd	r4, [r1], #8
	strd	r2, [r0], #8
	strd	r4, [r0], #8
	bgt	1b
	beq	2b

	ldmfd	sp!, {r4, r5, pc}

	.align	5
/*
 * XScale optimised clear_user_page
 *  r0 = destination
 *  r1 = virtual user address of ultimate destination page
 */
ENTRY(xsc3_mc_clear_user_page)
	mov	r1, #PAGE_SZ/32
	mov	r2, #0
	mov	r3, #0
1:	mcr	p15, 0, r0, c7, c6, 1		@ invalidate line
	strd	r2, [r0], #8
	strd	r2, [r0], #8
	strd	r2, [r0], #8
	strd	r2, [r0], #8
	subs	r1, r1, #1
	bne	1b
	mov	pc, lr

	__INITDATA

	.type	xsc3_mc_user_fns, #object
ENTRY(xsc3_mc_user_fns)
	.long	xsc3_mc_clear_user_page
	.long	xsc3_mc_copy_user_page
	.size	xsc3_mc_user_fns, . - xsc3_mc_user_fns
+2 −1
Original line number Diff line number Diff line
@@ -557,7 +557,8 @@ void __init create_mapping(struct map_desc *md)
	 *	supersections are only allocated for domain 0 regardless
	 *	of the actual domain assignments in use.
	 */
	if (cpu_architecture() >= CPU_ARCH_ARMv6 && domain == 0) {
	if ((cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())
		&& domain == 0) {
		/*
		 * Align to supersection boundary if !high pages.
		 * High pages have already been checked for proper
Loading