Commit 4dd5b673 authored by John David Anglin's avatar John David Anglin Committed by Helge Deller
Browse files

parisc: Purge TLB entries after updating page table entry and set page accessed flag in TLB handler



This patch may resolve some races in TLB handling.  Hopefully, TLB
inserts are accesses and protected by spin lock.

If not, we may need to IPI calls and do local purges on PA 2.0.

Signed-off-by: default avatarJohn David Anglin <dave.anglin@bell.net>
Signed-off-by: default avatarHelge Deller <deller@gmx.de>
parent d27dfa13
Loading
Loading
Loading
Loading
+12 −12
Original line number Diff line number Diff line
@@ -66,9 +66,9 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)
		unsigned long flags;				\
		spin_lock_irqsave(&pa_tlb_lock, flags);		\
		old_pte = *ptep;				\
		set_pte(ptep, pteval);				\
		if (pte_inserted(old_pte))			\
			purge_tlb_entries(mm, addr);		\
		set_pte(ptep, pteval);				\
		spin_unlock_irqrestore(&pa_tlb_lock, flags);	\
	} while (0)

@@ -227,22 +227,22 @@ static inline void purge_tlb_entries(struct mm_struct *mm, unsigned long addr)

#ifndef __ASSEMBLY__

#define PAGE_NONE	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
#define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
#define PAGE_NONE	__pgprot(_PAGE_PRESENT | _PAGE_USER)
#define PAGE_SHARED	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE)
/* Others seem to make this executable, I don't know if that's correct
   or not.  The stack is mapped this way though so this is necessary
   in the short term - dhd@linuxcare.com, 2000-08-08 */
#define PAGE_READONLY	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
#define PAGE_WRITEONLY  __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITE | _PAGE_ACCESSED)
#define PAGE_EXECREAD   __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
#define PAGE_READONLY	__pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ)
#define PAGE_WRITEONLY  __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_WRITE)
#define PAGE_EXECREAD   __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC)
#define PAGE_COPY       PAGE_EXECREAD
#define PAGE_RWX        __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
#define PAGE_RWX        __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)
#define PAGE_KERNEL	__pgprot(_PAGE_KERNEL)
#define PAGE_KERNEL_EXEC	__pgprot(_PAGE_KERNEL_EXEC)
#define PAGE_KERNEL_RWX	__pgprot(_PAGE_KERNEL_RWX)
#define PAGE_KERNEL_RO	__pgprot(_PAGE_KERNEL_RO)
#define PAGE_KERNEL_UNC	__pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
#define PAGE_GATEWAY    __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_GATEWAY| _PAGE_READ)
#define PAGE_GATEWAY    __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_GATEWAY| _PAGE_READ)


/*
@@ -479,8 +479,8 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned
		spin_unlock_irqrestore(&pa_tlb_lock, flags);
		return 0;
	}
	purge_tlb_entries(vma->vm_mm, addr);
	set_pte(ptep, pte_mkold(pte));
	purge_tlb_entries(vma->vm_mm, addr);
	spin_unlock_irqrestore(&pa_tlb_lock, flags);
	return 1;
}
@@ -493,9 +493,9 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,

	spin_lock_irqsave(&pa_tlb_lock, flags);
	old_pte = *ptep;
	set_pte(ptep, __pte(0));
	if (pte_inserted(old_pte))
		purge_tlb_entries(mm, addr);
	set_pte(ptep, __pte(0));
	spin_unlock_irqrestore(&pa_tlb_lock, flags);

	return old_pte;
@@ -505,8 +505,8 @@ static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr,
{
	unsigned long flags;
	spin_lock_irqsave(&pa_tlb_lock, flags);
	purge_tlb_entries(mm, addr);
	set_pte(ptep, pte_wrprotect(*ptep));
	purge_tlb_entries(mm, addr);
	spin_unlock_irqrestore(&pa_tlb_lock, flags);
}

+1 −3
Original line number Diff line number Diff line
@@ -483,9 +483,7 @@
	.macro		tlb_unlock0	spc,tmp
#ifdef CONFIG_SMP
	or,COND(=)	%r0,\spc,%r0
	sync
	or,COND(=)	%r0,\spc,%r0
	stw             \spc,0(\tmp)
	stw,ma		\spc,0(\tmp)
#endif
	.endm