Commit c2ce6f9f authored by Anton Blanchard's avatar Anton Blanchard Committed by Michael Ellerman
Browse files

powerpc: Change vrX register defines to vX to match gcc and glibc



As our various loops (copy, string, crypto etc) get more complicated,
we want to share implementations between userspace (eg glibc) and
the kernel. We also want to write userspace test harnesses to put
in tools/testing/selftest.

One gratuitous difference between userspace and the kernel is the
VMX register definitions - the kernel uses vrX whereas both gcc and
glibc use vX.

Change the kernel to match userspace.

Signed-off-by: default avatarAnton Blanchard <anton@samba.org>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 06e5801b
Loading
Loading
Loading
Loading
+32 −32
Original line number Diff line number Diff line
@@ -637,38 +637,38 @@ END_FTR_SECTION_NESTED(CPU_FTR_HAS_PPR,CPU_FTR_HAS_PPR,945)

/* AltiVec Registers (VPRs) */

#define	vr0	0
#define	vr1	1
#define	vr2	2
#define	vr3	3
#define	vr4	4
#define	vr5	5
#define	vr6	6
#define	vr7	7
#define	vr8	8
#define	vr9	9
#define	vr10	10
#define	vr11	11
#define	vr12	12
#define	vr13	13
#define	vr14	14
#define	vr15	15
#define	vr16	16
#define	vr17	17
#define	vr18	18
#define	vr19	19
#define	vr20	20
#define	vr21	21
#define	vr22	22
#define	vr23	23
#define	vr24	24
#define	vr25	25
#define	vr26	26
#define	vr27	27
#define	vr28	28
#define	vr29	29
#define	vr30	30
#define	vr31	31
#define	v0	0
#define	v1	1
#define	v2	2
#define	v3	3
#define	v4	4
#define	v5	5
#define	v6	6
#define	v7	7
#define	v8	8
#define	v9	9
#define	v10	10
#define	v11	11
#define	v12	12
#define	v13	13
#define	v14	14
#define	v15	15
#define	v16	16
#define	v17	17
#define	v18	18
#define	v19	19
#define	v20	20
#define	v21	21
#define	v22	22
#define	v23	23
#define	v24	24
#define	v25	25
#define	v26	26
#define	v27	27
#define	v28	28
#define	v29	29
#define	v30	30
#define	v31	31

/* VSX Registers (VSRs) */

+1 −1
Original line number Diff line number Diff line
@@ -136,7 +136,7 @@ struct pt_regs {
#endif /* __powerpc64__ */

/*
 * Get/set all the altivec registers vr0..vr31, vscr, vrsave, in one go.
 * Get/set all the altivec registers v0..v31, vscr, vrsave, in one go.
 * The transfer totals 34 quadword.  Quadwords 0-31 contain the
 * corresponding vector registers.  Quadword 32 contains the vscr as the
 * last word (offset 12) within that quadword.  Quadword 33 contains the
+4 −4
Original line number Diff line number Diff line
@@ -152,9 +152,9 @@ _GLOBAL(tm_reclaim)

	addi	r7, r3, THREAD_TRANSACT_VRSTATE
	SAVE_32VRS(0, r6, r7)	/* r6 scratch, r7 transact vr state */
	mfvscr	vr0
	mfvscr	v0
	li	r6, VRSTATE_VSCR
	stvx	vr0, r7, r6
	stvx	v0, r7, r6
dont_backup_vec:
	mfspr	r0, SPRN_VRSAVE
	std	r0, THREAD_TRANSACT_VRSAVE(r3)
@@ -359,8 +359,8 @@ _GLOBAL(__tm_recheckpoint)

	addi	r8, r3, THREAD_VRSTATE
	li	r5, VRSTATE_VSCR
	lvx	vr0, r8, r5
	mtvscr	vr0
	lvx	v0, r8, r5
	mtvscr	v0
	REST_32VRS(0, r5, r8)			/* r5 scratch, r8 ptr */
dont_restore_vec:
	ld	r5, THREAD_VRSAVE(r3)
+12 −12
Original line number Diff line number Diff line
@@ -24,8 +24,8 @@ _GLOBAL(do_load_up_transact_altivec)
	stw	r4,THREAD_USED_VR(r3)

	li	r10,THREAD_TRANSACT_VRSTATE+VRSTATE_VSCR
	lvx	vr0,r10,r3
	mtvscr	vr0
	lvx	v0,r10,r3
	mtvscr	v0
	addi	r10,r3,THREAD_TRANSACT_VRSTATE
	REST_32VRS(0,r4,r10)

@@ -52,8 +52,8 @@ _GLOBAL(vec_enable)
 */
_GLOBAL(load_vr_state)
	li	r4,VRSTATE_VSCR
	lvx	vr0,r4,r3
	mtvscr	vr0
	lvx	v0,r4,r3
	mtvscr	v0
	REST_32VRS(0,r4,r3)
	blr

@@ -63,9 +63,9 @@ _GLOBAL(load_vr_state)
 */
_GLOBAL(store_vr_state)
	SAVE_32VRS(0, r4, r3)
	mfvscr	vr0
	mfvscr	v0
	li	r4, VRSTATE_VSCR
	stvx	vr0, r4, r3
	stvx	v0, r4, r3
	blr

/*
@@ -104,9 +104,9 @@ _GLOBAL(load_up_altivec)
	addi	r4,r4,THREAD
	addi	r6,r4,THREAD_VRSTATE
	SAVE_32VRS(0,r5,r6)
	mfvscr	vr0
	mfvscr	v0
	li	r10,VRSTATE_VSCR
	stvx	vr0,r10,r6
	stvx	v0,r10,r6
	/* Disable VMX for last_task_used_altivec */
	PPC_LL	r5,PT_REGS(r4)
	toreal(r5)
@@ -142,8 +142,8 @@ _GLOBAL(load_up_altivec)
	li	r4,1
	li	r10,VRSTATE_VSCR
	stw	r4,THREAD_USED_VR(r5)
	lvx	vr0,r10,r6
	mtvscr	vr0
	lvx	v0,r10,r6
	mtvscr	v0
	REST_32VRS(0,r4,r6)
#ifndef CONFIG_SMP
	/* Update last_task_used_altivec to 'current' */
@@ -186,9 +186,9 @@ _GLOBAL(giveup_altivec)
	addi	r7,r3,THREAD_VRSTATE
2:	PPC_LCMPI	0,r5,0
	SAVE_32VRS(0,r4,r7)
	mfvscr	vr0
	mfvscr	v0
	li	r4,VRSTATE_VSCR
	stvx	vr0,r4,r7
	stvx	v0,r4,r7
	beq	1f
	PPC_LL	r4,_MSR-STACK_FRAME_OVERHEAD(r5)
#ifdef CONFIG_VSX
+16 −16
Original line number Diff line number Diff line
@@ -83,23 +83,23 @@ _GLOBAL(copypage_power7)
	li	r12,112

	.align	5
1:	lvx	vr7,r0,r4
	lvx	vr6,r4,r6
	lvx	vr5,r4,r7
	lvx	vr4,r4,r8
	lvx	vr3,r4,r9
	lvx	vr2,r4,r10
	lvx	vr1,r4,r11
	lvx	vr0,r4,r12
1:	lvx	v7,r0,r4
	lvx	v6,r4,r6
	lvx	v5,r4,r7
	lvx	v4,r4,r8
	lvx	v3,r4,r9
	lvx	v2,r4,r10
	lvx	v1,r4,r11
	lvx	v0,r4,r12
	addi	r4,r4,128
	stvx	vr7,r0,r3
	stvx	vr6,r3,r6
	stvx	vr5,r3,r7
	stvx	vr4,r3,r8
	stvx	vr3,r3,r9
	stvx	vr2,r3,r10
	stvx	vr1,r3,r11
	stvx	vr0,r3,r12
	stvx	v7,r0,r3
	stvx	v6,r3,r6
	stvx	v5,r3,r7
	stvx	v4,r3,r8
	stvx	v3,r3,r9
	stvx	v2,r3,r10
	stvx	v1,r3,r11
	stvx	v0,r3,r12
	addi	r3,r3,128
	bdnz	1b

Loading