Commit badea125 authored by David Mosberger-Tang's avatar David Mosberger-Tang Committed by Tony Luck
Browse files

[IA64] Fix race in mm-context wrap-around logic.



The patch below should fix a race which could cause stale TLB entries.
Specifically, when 2 CPUs ended up racing for entrance to
wrap_mmu_context().  The losing CPU would find that by the time it
acquired ctx.lock, mm->context already had a valid value, but then it
failed to (re-)check the delayed TLB flushing logic and hence could
end up using a context number when there were still stale entries in
its TLB.  The fix is to check for delayed TLB flushes only after
mm->context is valid (non-zero).  The patch also makes GCC v4.x
happier by defining a non-volatile variant of mm_context_t called
nv_mm_context_t.

Signed-off-by: default avatarDavid Mosberger-Tang <David.Mosberger@acm.org>
Signed-off-by: default avatarTony Luck <tony.luck@intel.com>
parent 7d69fa62
Loading
Loading
Loading
Loading
+5 −3
Original line number Diff line number Diff line
@@ -2,10 +2,12 @@
#define __MMU_H

/*
 * Type for a context number.  We declare it volatile to ensure proper ordering when it's
 * accessed outside of spinlock'd critical sections (e.g., as done in activate_mm() and
 * init_new_context()).
 * Type for a context number.  We declare it volatile to ensure proper
 * ordering when it's accessed outside of spinlock'd critical sections
 * (e.g., as done in activate_mm() and init_new_context()).
 */
typedef volatile unsigned long mm_context_t;

typedef unsigned long nv_mm_context_t;

#endif
+32 −22
Original line number Diff line number Diff line
@@ -55,22 +55,27 @@ static inline void
delayed_tlb_flush (void)
{
	extern void local_flush_tlb_all (void);
	unsigned long flags;

	if (unlikely(__ia64_per_cpu_var(ia64_need_tlb_flush))) {
		spin_lock_irqsave(&ia64_ctx.lock, flags);
		{
			if (__ia64_per_cpu_var(ia64_need_tlb_flush)) {
				local_flush_tlb_all();
				__ia64_per_cpu_var(ia64_need_tlb_flush) = 0;
			}
		}
		spin_unlock_irqrestore(&ia64_ctx.lock, flags);
	}
}

static inline mm_context_t
static inline nv_mm_context_t
get_mmu_context (struct mm_struct *mm)
{
	unsigned long flags;
	mm_context_t context = mm->context;

	if (context)
		return context;
	nv_mm_context_t context = mm->context;

	if (unlikely(!context)) {
		spin_lock_irqsave(&ia64_ctx.lock, flags);
		{
			/* re-check, now that we've got the lock: */
@@ -83,6 +88,13 @@ get_mmu_context (struct mm_struct *mm)
			}
		}
		spin_unlock_irqrestore(&ia64_ctx.lock, flags);
	}
	/*
	 * Ensure we're not starting to use "context" before any old
	 * uses of it are gone from our TLB.
	 */
	delayed_tlb_flush();

	return context;
}

@@ -104,7 +116,7 @@ destroy_context (struct mm_struct *mm)
}

static inline void
reload_context (mm_context_t context)
reload_context (nv_mm_context_t context)
{
	unsigned long rid;
	unsigned long rid_incr = 0;
@@ -138,7 +150,7 @@ reload_context (mm_context_t context)
static inline void
activate_context (struct mm_struct *mm)
{
	mm_context_t context;
	nv_mm_context_t context;

	do {
		context = get_mmu_context(mm);
@@ -157,8 +169,6 @@ activate_context (struct mm_struct *mm)
static inline void
activate_mm (struct mm_struct *prev, struct mm_struct *next)
{
	delayed_tlb_flush();

	/*
	 * We may get interrupts here, but that's OK because interrupt handlers cannot
	 * touch user-space.