Commit 7ea62160 authored by Dan Williams's avatar Dan Williams Committed by Linus Torvalds
Browse files

mm/sparsemem: prepare for sub-section ranges

Prepare the memory hot-{add,remove} paths for handling sub-section
ranges by plumbing the starting page frame and number of pages being
handled through arch_{add,remove}_memory() to
sparse_{add,remove}_one_section().

This is simply plumbing, small cleanups, and some identifier renames.
No intended functional changes.

Link: http://lkml.kernel.org/r/156092353780.979959.9713046515562743194.stgit@dwillia2-desk3.amr.corp.intel.com


Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
Reviewed-by: default avatarPavel Tatashin <pasha.tatashin@soleen.com>
Tested-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>	[ppc64]
Reviewed-by: default avatarOscar Salvador <osalvador@suse.de>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Logan Gunthorpe <logang@deltatee.com>
Cc: David Hildenbrand <david@redhat.com>
Cc: Jane Chu <jane.chu@oracle.com>
Cc: Jeff Moyer <jmoyer@redhat.com>
Cc: Jérôme Glisse <jglisse@redhat.com>
Cc: Jonathan Corbet <corbet@lwn.net>
Cc: Mike Rapoport <rppt@linux.ibm.com>
Cc: Toshi Kani <toshi.kani@hpe.com>
Cc: Wei Yang <richardw.yang@linux.intel.com>
Cc: Jason Gunthorpe <jgg@mellanox.com>
Cc: Christoph Hellwig <hch@lst.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 46d945ae
Loading
Loading
Loading
Loading
+3 −2
Original line number Diff line number Diff line
@@ -346,9 +346,10 @@ extern int add_memory_resource(int nid, struct resource *resource);
extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
		unsigned long nr_pages, struct vmem_altmap *altmap);
extern bool is_memblock_offlined(struct memory_block *mem);
extern int sparse_add_one_section(int nid, unsigned long start_pfn,
				  struct vmem_altmap *altmap);
extern int sparse_add_section(int nid, unsigned long pfn,
		unsigned long nr_pages, struct vmem_altmap *altmap);
extern void sparse_remove_one_section(struct mem_section *ms,
		unsigned long pfn, unsigned long nr_pages,
		unsigned long map_offset, struct vmem_altmap *altmap);
extern struct page *sparse_decode_mem_map(unsigned long coded_mem_map,
					  unsigned long pnum);
+71 −43
Original line number Diff line number Diff line
@@ -252,51 +252,84 @@ void __init register_page_bootmem_info_node(struct pglist_data *pgdat)
}
#endif /* CONFIG_HAVE_BOOTMEM_INFO_NODE */

static int __meminit __add_section(int nid, unsigned long phys_start_pfn,
				   struct vmem_altmap *altmap)
static int __meminit __add_section(int nid, unsigned long pfn,
		unsigned long nr_pages,	struct vmem_altmap *altmap)
{
	int ret;

	if (pfn_valid(phys_start_pfn))
	if (pfn_valid(pfn))
		return -EEXIST;

	ret = sparse_add_one_section(nid, phys_start_pfn, altmap);
	ret = sparse_add_section(nid, pfn, nr_pages, altmap);
	return ret < 0 ? ret : 0;
}

static int check_pfn_span(unsigned long pfn, unsigned long nr_pages,
		const char *reason)
{
	/*
	 * Disallow all operations smaller than a sub-section and only
	 * allow operations smaller than a section for
	 * SPARSEMEM_VMEMMAP. Note that check_hotplug_memory_range()
	 * enforces a larger memory_block_size_bytes() granularity for
	 * memory that will be marked online, so this check should only
	 * fire for direct arch_{add,remove}_memory() users outside of
	 * add_memory_resource().
	 */
	unsigned long min_align;

	if (IS_ENABLED(CONFIG_SPARSEMEM_VMEMMAP))
		min_align = PAGES_PER_SUBSECTION;
	else
		min_align = PAGES_PER_SECTION;
	if (!IS_ALIGNED(pfn, min_align)
			|| !IS_ALIGNED(nr_pages, min_align)) {
		WARN(1, "Misaligned __%s_pages start: %#lx end: #%lx\n",
				reason, pfn, pfn + nr_pages - 1);
		return -EINVAL;
	}
	return 0;
}

/*
 * Reasonably generic function for adding memory.  It is
 * expected that archs that support memory hotplug will
 * call this function after deciding the zone to which to
 * add the new pages.
 */
int __ref __add_pages(int nid, unsigned long phys_start_pfn,
		unsigned long nr_pages, struct mhp_restrictions *restrictions)
int __ref __add_pages(int nid, unsigned long pfn, unsigned long nr_pages,
		struct mhp_restrictions *restrictions)
{
	unsigned long i;
	int err = 0;
	int start_sec, end_sec;
	int start_sec, end_sec, err;
	struct vmem_altmap *altmap = restrictions->altmap;

	/* during initialize mem_map, align hot-added range to section */
	start_sec = pfn_to_section_nr(phys_start_pfn);
	end_sec = pfn_to_section_nr(phys_start_pfn + nr_pages - 1);

	if (altmap) {
		/*
		 * Validate altmap is within bounds of the total request
		 */
		if (altmap->base_pfn != phys_start_pfn
		if (altmap->base_pfn != pfn
				|| vmem_altmap_offset(altmap) > nr_pages) {
			pr_warn_once("memory add fail, invalid altmap\n");
			err = -EINVAL;
			goto out;
			return -EINVAL;
		}
		altmap->alloc = 0;
	}

	err = check_pfn_span(pfn, nr_pages, "add");
	if (err)
		return err;

	start_sec = pfn_to_section_nr(pfn);
	end_sec = pfn_to_section_nr(pfn + nr_pages - 1);
	for (i = start_sec; i <= end_sec; i++) {
		err = __add_section(nid, section_nr_to_pfn(i), altmap);
		unsigned long pfns;

		pfns = min(nr_pages, PAGES_PER_SECTION
				- (pfn & ~PAGE_SECTION_MASK));
		err = __add_section(nid, pfn, pfns, altmap);
		pfn += pfns;
		nr_pages -= pfns;

		/*
		 * EEXIST is finally dealt with by ioresource collision
@@ -309,7 +342,6 @@ int __ref __add_pages(int nid, unsigned long phys_start_pfn,
		cond_resched();
	}
	vmemmap_populate_print_last();
out:
	return err;
}

@@ -487,10 +519,10 @@ static void shrink_pgdat_span(struct pglist_data *pgdat,
	pgdat->node_spanned_pages = 0;
}

static void __remove_zone(struct zone *zone, unsigned long start_pfn)
static void __remove_zone(struct zone *zone, unsigned long start_pfn,
		unsigned long nr_pages)
{
	struct pglist_data *pgdat = zone->zone_pgdat;
	int nr_pages = PAGES_PER_SECTION;
	unsigned long flags;

	pgdat_resize_lock(zone->zone_pgdat, &flags);
@@ -499,27 +531,23 @@ static void __remove_zone(struct zone *zone, unsigned long start_pfn)
	pgdat_resize_unlock(zone->zone_pgdat, &flags);
}

static void __remove_section(struct zone *zone, struct mem_section *ms,
			     unsigned long map_offset,
static void __remove_section(struct zone *zone, unsigned long pfn,
		unsigned long nr_pages, unsigned long map_offset,
		struct vmem_altmap *altmap)
{
	unsigned long start_pfn;
	int scn_nr;
	struct mem_section *ms = __nr_to_section(pfn_to_section_nr(pfn));

	if (WARN_ON_ONCE(!valid_section(ms)))
		return;

	scn_nr = __section_nr(ms);
	start_pfn = section_nr_to_pfn((unsigned long)scn_nr);
	__remove_zone(zone, start_pfn);

	sparse_remove_one_section(ms, map_offset, altmap);
	__remove_zone(zone, pfn, nr_pages);
	sparse_remove_one_section(ms, pfn, nr_pages, map_offset, altmap);
}

/**
 * __remove_pages() - remove sections of pages from a zone
 * @zone: zone from which pages need to be removed
 * @phys_start_pfn: starting pageframe (must be aligned to start of a section)
 * @pfn: starting pageframe (must be aligned to start of a section)
 * @nr_pages: number of pages to remove (must be multiple of section size)
 * @altmap: alternative device page map or %NULL if default memmap is used
 *
@@ -528,30 +556,30 @@ static void __remove_section(struct zone *zone, struct mem_section *ms,
 * sure that pages are marked reserved and zones are adjust properly by
 * calling offline_pages().
 */
void __remove_pages(struct zone *zone, unsigned long phys_start_pfn,
void __remove_pages(struct zone *zone, unsigned long pfn,
		    unsigned long nr_pages, struct vmem_altmap *altmap)
{
	unsigned long i;
	unsigned long map_offset = 0;
	int sections_to_remove;
	int i, start_sec, end_sec;

	map_offset = vmem_altmap_offset(altmap);

	clear_zone_contiguous(zone);

	/*
	 * We can only remove entire sections
	 */
	BUG_ON(phys_start_pfn & ~PAGE_SECTION_MASK);
	BUG_ON(nr_pages % PAGES_PER_SECTION);
	if (check_pfn_span(pfn, nr_pages, "remove"))
		return;

	sections_to_remove = nr_pages / PAGES_PER_SECTION;
	for (i = 0; i < sections_to_remove; i++) {
		unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION;
	start_sec = pfn_to_section_nr(pfn);
	end_sec = pfn_to_section_nr(pfn + nr_pages - 1);
	for (i = start_sec; i <= end_sec; i++) {
		unsigned long pfns;

		cond_resched();
		__remove_section(zone, __pfn_to_section(pfn), map_offset,
				 altmap);
		pfns = min(nr_pages, PAGES_PER_SECTION
				- (pfn & ~PAGE_SECTION_MASK));
		__remove_section(zone, pfn, pfns, map_offset, altmap);
		pfn += pfns;
		nr_pages -= pfns;
		map_offset = 0;
	}

+7 −9
Original line number Diff line number Diff line
@@ -728,8 +728,8 @@ static void free_map_bootmem(struct page *memmap)
 * * -EEXIST	- Section has been present.
 * * -ENOMEM	- Out of memory.
 */
int __meminit sparse_add_one_section(int nid, unsigned long start_pfn,
				     struct vmem_altmap *altmap)
int __meminit sparse_add_section(int nid, unsigned long start_pfn,
		unsigned long nr_pages, struct vmem_altmap *altmap)
{
	unsigned long section_nr = pfn_to_section_nr(start_pfn);
	struct mem_section_usage *usage;
@@ -835,7 +835,8 @@ static void free_section_usage(struct mem_section *ms, struct page *memmap,
		free_map_bootmem(memmap);
}

void sparse_remove_one_section(struct mem_section *ms, unsigned long map_offset,
void sparse_remove_one_section(struct mem_section *ms, unsigned long pfn,
		unsigned long nr_pages, unsigned long map_offset,
		struct vmem_altmap *altmap)
{
	struct page *memmap = NULL;
@@ -849,10 +850,7 @@ void sparse_remove_one_section(struct mem_section *ms, unsigned long map_offset,
		ms->usage = NULL;
	}

	clear_hwpoisoned_pages(memmap + map_offset,
			PAGES_PER_SECTION - map_offset);
	free_section_usage(ms, memmap, usage,
			section_nr_to_pfn(__section_nr(ms)),
			PAGES_PER_SECTION, altmap);
	clear_hwpoisoned_pages(memmap + map_offset, nr_pages - map_offset);
	free_section_usage(ms, memmap, usage, pfn, nr_pages, altmap);
}
#endif /* CONFIG_MEMORY_HOTPLUG */