Commit 997153b9 authored by Guo Ren's avatar Guo Ren
Browse files

csky: Add flush_icache_mm to defer flush icache all



Some CPUs don't support icache.va instruction to maintain the whole
smp cores' icache. Using icache.all + IPI casue a lot on performace
and using defer mechanism could reduce the number of calling icache
_flush_all functions.

Signed-off-by: default avatarGuo Ren <guoren@linux.alibaba.com>
parent cc1f6563
Loading
Loading
Loading
Loading
+2 −0
Original line number Diff line number Diff line
@@ -48,6 +48,8 @@ extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, u

#define flush_icache_page(vma, page)		do {} while (0);
#define flush_icache_range(start, end)		cache_wbinv_range(start, end)
#define flush_icache_mm_range(mm, start, end)	cache_wbinv_range(start, end)
#define flush_icache_deferred(mm)		do {} while (0);

#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
do { \
+55 −0
Original line number Diff line number Diff line
@@ -28,3 +28,58 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,

	kunmap_atomic((void *) addr);
}

void flush_icache_deferred(struct mm_struct *mm)
{
	unsigned int cpu = smp_processor_id();
	cpumask_t *mask = &mm->context.icache_stale_mask;

	if (cpumask_test_cpu(cpu, mask)) {
		cpumask_clear_cpu(cpu, mask);
		/*
		 * Ensure the remote hart's writes are visible to this hart.
		 * This pairs with a barrier in flush_icache_mm.
		 */
		smp_mb();
		local_icache_inv_all(NULL);
	}
}

void flush_icache_mm_range(struct mm_struct *mm,
		unsigned long start, unsigned long end)
{
	unsigned int cpu;
	cpumask_t others, *mask;

	preempt_disable();

#ifdef CONFIG_CPU_HAS_ICACHE_INS
	if (mm == current->mm) {
		icache_inv_range(start, end);
		preempt_enable();
		return;
	}
#endif

	/* Mark every hart's icache as needing a flush for this MM. */
	mask = &mm->context.icache_stale_mask;
	cpumask_setall(mask);

	/* Flush this hart's I$ now, and mark it as flushed. */
	cpu = smp_processor_id();
	cpumask_clear_cpu(cpu, mask);
	local_icache_inv_all(NULL);

	/*
	 * Flush the I$ of other harts concurrently executing, and mark them as
	 * flushed.
	 */
	cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));

	if (mm != current->active_mm || !cpumask_empty(&others)) {
		on_each_cpu_mask(&others, local_icache_inv_all, NULL, 1);
		cpumask_clear(mask);
	}

	preempt_enable();
}
+11 −3
Original line number Diff line number Diff line
@@ -31,15 +31,23 @@ static inline void flush_dcache_page(struct page *page)

#define flush_icache_range(start, end)		cache_wbinv_range(start, end)

void flush_icache_mm_range(struct mm_struct *mm,
			unsigned long start, unsigned long end);
void flush_icache_deferred(struct mm_struct *mm);

#define flush_cache_vmap(start, end)		do { } while (0)
#define flush_cache_vunmap(start, end)		do { } while (0)

#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
	memcpy(dst, src, len); \
	if (vma->vm_flags & VM_EXEC) \
		cache_wbinv_range((unsigned long)dst, \
	if (vma->vm_flags & VM_EXEC) { \
		dcache_wb_range((unsigned long)dst, \
				(unsigned long)dst + len); \
		flush_icache_mm_range(current->mm, \
				(unsigned long)dst, \
				(unsigned long)dst + len); \
		} \
} while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
	memcpy(dst, src, len)
+1 −0
Original line number Diff line number Diff line
@@ -4,6 +4,7 @@
#ifndef __ASM_CSKY_CACHEFLUSH_H
#define __ASM_CSKY_CACHEFLUSH_H

#include <linux/mm.h>
#include <abi/cacheflush.h>

#endif /* __ASM_CSKY_CACHEFLUSH_H */
+1 −0
Original line number Diff line number Diff line
@@ -7,6 +7,7 @@
typedef struct {
	atomic64_t	asid;
	void *vdso;
	cpumask_t	icache_stale_mask;
} mm_context_t;

#endif /* __ASM_CSKY_MMU_H */
Loading