Commit 8fb1e910 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge branch 'akpm' (patches from Andrew)

Merge misc fixes from Andrew Morton:
 "9 patches.

  Subsystems affected by this patch series: mm (thp, memcg, gup,
  migration, memory-hotplug), lib, and x86"

* emailed patches from Andrew Morton <akpm@linux-foundation.org>:
  mm: don't rely on system state to detect hot-plug operations
  mm: replace memmap_context by meminit_context
  arch/x86/lib/usercopy_64.c: fix __copy_user_flushcache() cache writeback
  lib/memregion.c: include memregion.h
  lib/string.c: implement stpcpy
  mm/migrate: correct thp migration stats
  mm/gup: fix gup_fast with dynamic page table folding
  mm: memcontrol: fix missing suffix of workingset_restore
  mm, THP, swap: fix allocating cluster for swapfile by mistake
parents ce268425 f85086f9
Loading
Loading
Loading
Loading
+18 −7
Original line number Diff line number Diff line
@@ -1324,15 +1324,26 @@ PAGE_SIZE multiple when read back.
	  pgmajfault
		Number of major page faults incurred

	  workingset_refault
		Number of refaults of previously evicted pages
	  workingset_refault_anon
		Number of refaults of previously evicted anonymous pages.

	  workingset_activate
		Number of refaulted pages that were immediately activated
	  workingset_refault_file
		Number of refaults of previously evicted file pages.

	  workingset_restore
		Number of restored pages which have been detected as an active
		workingset before they got reclaimed.
	  workingset_activate_anon
		Number of refaulted anonymous pages that were immediately
		activated.

	  workingset_activate_file
		Number of refaulted file pages that were immediately activated.

	  workingset_restore_anon
		Number of restored anonymous pages which have been detected as
		an active workingset before they got reclaimed.

	  workingset_restore_file
		Number of restored file pages which have been detected as an
		active workingset before they got reclaimed.

	  workingset_nodereclaim
		Number of times a shadow node has been reclaimed
+3 −3
Original line number Diff line number Diff line
@@ -538,7 +538,7 @@ virtual_memmap_init(u64 start, u64 end, void *arg)
	if (map_start < map_end)
		memmap_init_zone((unsigned long)(map_end - map_start),
				 args->nid, args->zone, page_to_pfn(map_start),
				 MEMMAP_EARLY, NULL);
				 MEMINIT_EARLY, NULL);
	return 0;
}

@@ -547,8 +547,8 @@ memmap_init (unsigned long size, int nid, unsigned long zone,
	     unsigned long start_pfn)
{
	if (!vmem_map) {
		memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY,
				NULL);
		memmap_init_zone(size, nid, zone, start_pfn,
				 MEMINIT_EARLY, NULL);
	} else {
		struct page *start;
		struct memmap_init_callback_data args;
+30 −12
Original line number Diff line number Diff line
@@ -1260,26 +1260,44 @@ static inline pgd_t *pgd_offset_raw(pgd_t *pgd, unsigned long address)

#define pgd_offset(mm, address) pgd_offset_raw(READ_ONCE((mm)->pgd), address)

static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
static inline p4d_t *p4d_offset_lockless(pgd_t *pgdp, pgd_t pgd, unsigned long address)
{
	if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1)
		return (p4d_t *) pgd_deref(*pgd) + p4d_index(address);
	return (p4d_t *) pgd;
	if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1)
		return (p4d_t *) pgd_deref(pgd) + p4d_index(address);
	return (p4d_t *) pgdp;
}
#define p4d_offset_lockless p4d_offset_lockless

static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
static inline p4d_t *p4d_offset(pgd_t *pgdp, unsigned long address)
{
	if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2)
		return (pud_t *) p4d_deref(*p4d) + pud_index(address);
	return (pud_t *) p4d;
	return p4d_offset_lockless(pgdp, *pgdp, address);
}

static inline pud_t *pud_offset_lockless(p4d_t *p4dp, p4d_t p4d, unsigned long address)
{
	if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2)
		return (pud_t *) p4d_deref(p4d) + pud_index(address);
	return (pud_t *) p4dp;
}
#define pud_offset_lockless pud_offset_lockless

static inline pud_t *pud_offset(p4d_t *p4dp, unsigned long address)
{
	return pud_offset_lockless(p4dp, *p4dp, address);
}
#define pud_offset pud_offset

static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
static inline pmd_t *pmd_offset_lockless(pud_t *pudp, pud_t pud, unsigned long address)
{
	if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3)
		return (pmd_t *) pud_deref(pud) + pmd_index(address);
	return (pmd_t *) pudp;
}
#define pmd_offset_lockless pmd_offset_lockless

static inline pmd_t *pmd_offset(pud_t *pudp, unsigned long address)
{
	if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3)
		return (pmd_t *) pud_deref(*pud) + pmd_index(address);
	return (pmd_t *) pud;
	return pmd_offset_lockless(pudp, *pudp, address);
}
#define pmd_offset pmd_offset

+1 −1
Original line number Diff line number Diff line
@@ -120,7 +120,7 @@ long __copy_user_flushcache(void *dst, const void __user *src, unsigned size)
	 */
	if (size < 8) {
		if (!IS_ALIGNED(dest, 4) || size != 4)
			clean_cache_range(dst, 1);
			clean_cache_range(dst, size);
	} else {
		if (!IS_ALIGNED(dest, 8)) {
			dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
+55 −30
Original line number Diff line number Diff line
@@ -761,14 +761,36 @@ static int __ref get_nid_for_pfn(unsigned long pfn)
	return pfn_to_nid(pfn);
}

static int do_register_memory_block_under_node(int nid,
					       struct memory_block *mem_blk)
{
	int ret;

	/*
	 * If this memory block spans multiple nodes, we only indicate
	 * the last processed node.
	 */
	mem_blk->nid = nid;

	ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj,
				       &mem_blk->dev.kobj,
				       kobject_name(&mem_blk->dev.kobj));
	if (ret)
		return ret;

	return sysfs_create_link_nowarn(&mem_blk->dev.kobj,
				&node_devices[nid]->dev.kobj,
				kobject_name(&node_devices[nid]->dev.kobj));
}

/* register memory section under specified node if it spans that node */
static int register_mem_sect_under_node(struct memory_block *mem_blk,
static int register_mem_block_under_node_early(struct memory_block *mem_blk,
					       void *arg)
{
	unsigned long memory_block_pfns = memory_block_size_bytes() / PAGE_SIZE;
	unsigned long start_pfn = section_nr_to_pfn(mem_blk->start_section_nr);
	unsigned long end_pfn = start_pfn + memory_block_pfns - 1;
	int ret, nid = *(int *)arg;
	int nid = *(int *)arg;
	unsigned long pfn;

	for (pfn = start_pfn; pfn <= end_pfn; pfn++) {
@@ -785,36 +807,31 @@ static int register_mem_sect_under_node(struct memory_block *mem_blk,
		}

		/*
		 * We need to check if page belongs to nid only for the boot
		 * case, during hotplug we know that all pages in the memory
		 * block belong to the same node.
		 * We need to check if page belongs to nid only at the boot
		 * case because node's ranges can be interleaved.
		 */
		if (system_state == SYSTEM_BOOTING) {
		page_nid = get_nid_for_pfn(pfn);
		if (page_nid < 0)
			continue;
		if (page_nid != nid)
			continue;

		return do_register_memory_block_under_node(nid, mem_blk);
	}
	/* mem section does not span the specified node */
	return 0;
}

/*
		 * If this memory block spans multiple nodes, we only indicate
		 * the last processed node.
 * During hotplug we know that all pages in the memory block belong to the same
 * node.
 */
		mem_blk->nid = nid;

		ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj,
					&mem_blk->dev.kobj,
					kobject_name(&mem_blk->dev.kobj));
		if (ret)
			return ret;
static int register_mem_block_under_node_hotplug(struct memory_block *mem_blk,
						 void *arg)
{
	int nid = *(int *)arg;

		return sysfs_create_link_nowarn(&mem_blk->dev.kobj,
				&node_devices[nid]->dev.kobj,
				kobject_name(&node_devices[nid]->dev.kobj));
	}
	/* mem section does not span the specified node */
	return 0;
	return do_register_memory_block_under_node(nid, mem_blk);
}

/*
@@ -832,11 +849,19 @@ void unregister_memory_block_under_nodes(struct memory_block *mem_blk)
			  kobject_name(&node_devices[mem_blk->nid]->dev.kobj));
}

int link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn)
int link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn,
		      enum meminit_context context)
{
	walk_memory_blocks_func_t func;

	if (context == MEMINIT_HOTPLUG)
		func = register_mem_block_under_node_hotplug;
	else
		func = register_mem_block_under_node_early;

	return walk_memory_blocks(PFN_PHYS(start_pfn),
				  PFN_PHYS(end_pfn - start_pfn), (void *)&nid,
				  register_mem_sect_under_node);
				  func);
}

#ifdef CONFIG_HUGETLBFS
Loading