Commit 116d7533 authored by John David Anglin's avatar John David Anglin Committed by Helge Deller
Browse files

parisc: Use lpa instruction to load physical addresses in driver code



Most I/O in the kernel is done using the kernel offset mapping.
However, there is one API that uses aliased kernel address ranges:

> The final category of APIs is for I/O to deliberately aliased address
> ranges inside the kernel.  Such aliases are set up by use of the
> vmap/vmalloc API.  Since kernel I/O goes via physical pages, the I/O
> subsystem assumes that the user mapping and kernel offset mapping are
> the only aliases.  This isn't true for vmap aliases, so anything in
> the kernel trying to do I/O to vmap areas must manually manage
> coherency.  It must do this by flushing the vmap range before doing
> I/O and invalidating it after the I/O returns.

For this reason, we should use the hardware lpa instruction to load the
physical address of kernel virtual addresses in the driver code.

I believe we only use the vmap/vmalloc API with old PA 1.x processors
which don't have a sba, so we don't hit this problem.

Tested on c3750, c8000 and rp3440.

Signed-off-by: default avatarJohn David Anglin <dave.anglin@bell.net>
Signed-off-by: default avatarHelge Deller <deller@gmx.de>
parent ec13c82d
Loading
Loading
Loading
Loading
+24 −0
Original line number Diff line number Diff line
@@ -2,6 +2,30 @@
#ifndef __PARISC_SPECIAL_INSNS_H
#define __PARISC_SPECIAL_INSNS_H

#define lpa(va)	({			\
	unsigned long pa;		\
	__asm__ __volatile__(		\
		"copy %%r0,%0\n\t"	\
		"lpa %%r0(%1),%0"	\
		: "=r" (pa)		\
		: "r" (va)		\
		: "memory"		\
	);				\
	pa;				\
})

#define lpa_user(va)	({		\
	unsigned long pa;		\
	__asm__ __volatile__(		\
		"copy %%r0,%0\n\t"	\
		"lpa %%r0(%%sr3,%1),%0"	\
		: "=r" (pa)		\
		: "r" (va)		\
		: "memory"		\
	);				\
	pa;				\
})

#define mfctl(reg)	({		\
	unsigned long cr;		\
	__asm__ __volatile__(		\
+1 −1
Original line number Diff line number Diff line
@@ -570,7 +570,7 @@ ccio_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
	** "hints" parm includes the VALID bit!
	** "dep" clobbers the physical address offset bits as well.
	*/
	pa = virt_to_phys(vba);
	pa = lpa(vba);
	asm volatile("depw  %1,31,12,%0" : "+r" (pa) : "r" (hints));
	((u32 *)pdir_ptr)[1] = (u32) pa;

+1 −1
Original line number Diff line number Diff line
@@ -572,7 +572,7 @@ sba_io_pdir_entry(u64 *pdir_ptr, space_t sid, unsigned long vba,
	u64 pa; /* physical address */
	register unsigned ci; /* coherent index */

	pa = virt_to_phys(vba);
	pa = lpa(vba);
	pa &= IOVP_MASK;

	asm("lci 0(%1), %0" : "=r" (ci) : "r" (vba));