Commit a023748d authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 mm tree changes from Ingo Molnar:
 "The biggest change is full PAT support from Jürgen Gross:

     The x86 architecture offers via the PAT (Page Attribute Table) a
     way to specify different caching modes in page table entries.  The
     PAT MSR contains 8 entries each specifying one of 6 possible cache
     modes.  A pte references one of those entries via 3 bits:
     _PAGE_PAT, _PAGE_PWT and _PAGE_PCD.

     The Linux kernel currently supports only 4 different cache modes.
     The PAT MSR is set up in a way that the setting of _PAGE_PAT in a
     pte doesn't matter: the top 4 entries in the PAT MSR are the same
     as the 4 lower entries.

     This results in the kernel not supporting e.g. write-through mode.
     Especially this cache mode would speed up drivers of video cards
     which now have to use uncached accesses.

     OTOH some old processors (Pentium) don't support PAT correctly and
     the Xen hypervisor has been using a different PAT MSR configuration
     for some time now and can't change that as this setting is part of
     the ABI.

     This patch set abstracts the cache mode from the pte and introduces
     tables to translate between cache mode and pte bits (the default
     cache mode "write back" is hard-wired to PAT entry 0).  The tables
     are statically initialized with values being compatible to old
     processors and current usage.  As soon as the PAT MSR is changed
     (or - in case of Xen - is read at boot time) the tables are changed
     accordingly.  Requests of mappings with special cache modes are
     always possible now, in case they are not supported there will be a
     fallback to a compatible but slower mode.

     Summing it up, this patch set adds the following features:

      - capability to support WT and WP cache modes on processors with
        full PAT support

      - processors with no or uncorrect PAT support are still working as
        today, even if WT or WP cache mode are selected by drivers for
        some pages

      - reduction of Xen special handling regarding cache mode

  Another change is a boot speedup on ridiculously large RAM systems,
  plus other smaller fixes"

* 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (22 commits)
  x86: mm: Move PAT only functions to mm/pat.c
  xen: Support Xen pv-domains using PAT
  x86: Enable PAT to use cache mode translation tables
  x86: Respect PAT bit when copying pte values between large and normal pages
  x86: Support PAT bit in pagetable dump for lower levels
  x86: Clean up pgtable_types.h
  x86: Use new cache mode type in memtype related functions
  x86: Use new cache mode type in mm/ioremap.c
  x86: Use new cache mode type in setting page attributes
  x86: Remove looking for setting of _PAGE_PAT_LARGE in pageattr.c
  x86: Use new cache mode type in track_pfn_remap() and track_pfn_insert()
  x86: Use new cache mode type in mm/iomap_32.c
  x86: Use new cache mode type in asm/pgtable.h
  x86: Use new cache mode type in arch/x86/mm/init_64.c
  x86: Use new cache mode type in arch/x86/pci
  x86: Use new cache mode type in drivers/video/fbdev/vermilion
  x86: Use new cache mode type in drivers/video/fbdev/gbefb.c
  x86: Use new cache mode type in include/asm/fb.h
  x86: Make page cache mode a real type
  x86: mm: Use 2GB memory block size on large-memory x86-64 systems
  ...
parents 773fed91 0dbcae88
Loading
Loading
Loading
Loading
+0 −59
Original line number Diff line number Diff line
@@ -5,65 +5,6 @@
#include <asm-generic/cacheflush.h>
#include <asm/special_insns.h>

#ifdef CONFIG_X86_PAT
/*
 * X86 PAT uses page flags WC and Uncached together to keep track of
 * memory type of pages that have backing page struct. X86 PAT supports 3
 * different memory types, _PAGE_CACHE_WB, _PAGE_CACHE_WC and
 * _PAGE_CACHE_UC_MINUS and fourth state where page's memory type has not
 * been changed from its default (value of -1 used to denote this).
 * Note we do not support _PAGE_CACHE_UC here.
 */

#define _PGMT_DEFAULT		0
#define _PGMT_WC		(1UL << PG_arch_1)
#define _PGMT_UC_MINUS		(1UL << PG_uncached)
#define _PGMT_WB		(1UL << PG_uncached | 1UL << PG_arch_1)
#define _PGMT_MASK		(1UL << PG_uncached | 1UL << PG_arch_1)
#define _PGMT_CLEAR_MASK	(~_PGMT_MASK)

static inline unsigned long get_page_memtype(struct page *pg)
{
	unsigned long pg_flags = pg->flags & _PGMT_MASK;

	if (pg_flags == _PGMT_DEFAULT)
		return -1;
	else if (pg_flags == _PGMT_WC)
		return _PAGE_CACHE_WC;
	else if (pg_flags == _PGMT_UC_MINUS)
		return _PAGE_CACHE_UC_MINUS;
	else
		return _PAGE_CACHE_WB;
}

static inline void set_page_memtype(struct page *pg, unsigned long memtype)
{
	unsigned long memtype_flags = _PGMT_DEFAULT;
	unsigned long old_flags;
	unsigned long new_flags;

	switch (memtype) {
	case _PAGE_CACHE_WC:
		memtype_flags = _PGMT_WC;
		break;
	case _PAGE_CACHE_UC_MINUS:
		memtype_flags = _PGMT_UC_MINUS;
		break;
	case _PAGE_CACHE_WB:
		memtype_flags = _PGMT_WB;
		break;
	}

	do {
		old_flags = pg->flags;
		new_flags = (old_flags & _PGMT_CLEAR_MASK) | memtype_flags;
	} while (cmpxchg(&pg->flags, old_flags, new_flags) != old_flags);
}
#else
static inline unsigned long get_page_memtype(struct page *pg) { return -1; }
static inline void set_page_memtype(struct page *pg, unsigned long memtype) { }
#endif

/*
 * The set_memory_* API can be used to change various attributes of a virtual
 * address range. The attributes include:
+5 −1
Original line number Diff line number Diff line
@@ -8,8 +8,12 @@
static inline void fb_pgprotect(struct file *file, struct vm_area_struct *vma,
				unsigned long off)
{
	unsigned long prot;

	prot = pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK;
	if (boot_cpu_data.x86 > 3)
		pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
		pgprot_val(vma->vm_page_prot) =
			prot | cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS);
}

extern int fb_is_primary_device(struct fb_info *info);
+0 −2
Original line number Diff line number Diff line
@@ -136,9 +136,7 @@ enum fixed_addresses {
extern void reserve_top_address(unsigned long reserve);

#define FIXADDR_SIZE	(__end_of_permanent_fixed_addresses << PAGE_SHIFT)
#define FIXADDR_BOOT_SIZE	(__end_of_fixed_addresses << PAGE_SHIFT)
#define FIXADDR_START		(FIXADDR_TOP - FIXADDR_SIZE)
#define FIXADDR_BOOT_START	(FIXADDR_TOP - FIXADDR_BOOT_SIZE)

extern int fixmaps_set;

+14 −11
Original line number Diff line number Diff line
@@ -38,17 +38,20 @@ extern unsigned long highstart_pfn, highend_pfn;
/*
 * Ordering is:
 *
 * FIXADDR_TOP
 * 			fixed_addresses
 * FIXADDR_START
 * 			temp fixed addresses
 * FIXADDR_BOOT_START
 * 			Persistent kmap area
 * PKMAP_BASE
 * VMALLOC_END
 * 			Vmalloc area
 * high memory on:			              high_memory off:
 *    FIXADDR_TOP                                        FIXADDR_TOP
 *        fixed addresses                                    fixed addresses
 *    FIXADDR_START                                      FIXADDR_START
 *        temp fixed addresses/persistent kmap area      VMALLOC_END
 *    PKMAP_BASE                                             temp fixed addresses/vmalloc area
 *    VMALLOC_END                                        VMALLOC_START
 *        vmalloc area                                   high_memory
 *    VMALLOC_START
 *    high_memory
 *
 * The temp fixed area is only used during boot for early_ioremap(), and
 * it is unused when the ioremap() is functional. vmalloc/pkmap area become
 * available after early boot so the temp fixed area is available for re-use.
 */
#define LAST_PKMAP_MASK (LAST_PKMAP-1)
#define PKMAP_NR(virt)  ((virt-PKMAP_BASE) >> PAGE_SHIFT)
+1 −1
Original line number Diff line number Diff line
@@ -318,7 +318,7 @@ extern void *xlate_dev_mem_ptr(phys_addr_t phys);
extern void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr);

extern int ioremap_change_attr(unsigned long vaddr, unsigned long size,
				unsigned long prot_val);
				enum page_cache_mode pcm);
extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size);

extern bool is_early_ioremap_ptep(pte_t *ptep);
Loading