Commit da024512 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Dan Williams
Browse files

mm: pass the vmem_altmap to arch_remove_memory and __remove_pages



We can just pass this on instead of having to do a radix tree lookup
without proper locking 2 levels into the callchain.

Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
parent 7b73d978
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -663,7 +663,7 @@ int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
}

#ifdef CONFIG_MEMORY_HOTREMOVE
int arch_remove_memory(u64 start, u64 size)
int arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
{
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;
@@ -671,7 +671,7 @@ int arch_remove_memory(u64 start, u64 size)
	int ret;

	zone = page_zone(pfn_to_page(start_pfn));
	ret = __remove_pages(zone, start_pfn, nr_pages);
	ret = __remove_pages(zone, start_pfn, nr_pages, altmap);
	if (ret)
		pr_warn("%s: Problem encountered in __remove_pages() as"
			" ret=%d\n", __func__,  ret);
+2 −4
Original line number Diff line number Diff line
@@ -149,11 +149,10 @@ int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
}

#ifdef CONFIG_MEMORY_HOTREMOVE
int arch_remove_memory(u64 start, u64 size)
int arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
{
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;
	struct vmem_altmap *altmap;
	struct page *page;
	int ret;

@@ -162,11 +161,10 @@ int arch_remove_memory(u64 start, u64 size)
	 * when querying the zone.
	 */
	page = pfn_to_page(start_pfn);
	altmap = to_vmem_altmap((unsigned long) page);
	if (altmap)
		page += vmem_altmap_offset(altmap);

	ret = __remove_pages(page_zone(page), start_pfn, nr_pages);
	ret = __remove_pages(page_zone(page), start_pfn, nr_pages, altmap);
	if (ret)
		return ret;

+1 −1
Original line number Diff line number Diff line
@@ -240,7 +240,7 @@ int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
}

#ifdef CONFIG_MEMORY_HOTREMOVE
int arch_remove_memory(u64 start, u64 size)
int arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
{
	/*
	 * There is no hardware or firmware interface which could trigger a
+2 −2
Original line number Diff line number Diff line
@@ -510,7 +510,7 @@ EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
#endif

#ifdef CONFIG_MEMORY_HOTREMOVE
int arch_remove_memory(u64 start, u64 size)
int arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
{
	unsigned long start_pfn = PFN_DOWN(start);
	unsigned long nr_pages = size >> PAGE_SHIFT;
@@ -518,7 +518,7 @@ int arch_remove_memory(u64 start, u64 size)
	int ret;

	zone = page_zone(pfn_to_page(start_pfn));
	ret = __remove_pages(zone, start_pfn, nr_pages);
	ret = __remove_pages(zone, start_pfn, nr_pages, altmap);
	if (unlikely(ret))
		pr_warn("%s: Failed, __remove_pages() == %d\n", __func__,
			ret);
+2 −2
Original line number Diff line number Diff line
@@ -839,14 +839,14 @@ int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap,
}

#ifdef CONFIG_MEMORY_HOTREMOVE
int arch_remove_memory(u64 start, u64 size)
int arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
{
	unsigned long start_pfn = start >> PAGE_SHIFT;
	unsigned long nr_pages = size >> PAGE_SHIFT;
	struct zone *zone;

	zone = page_zone(pfn_to_page(start_pfn));
	return __remove_pages(zone, start_pfn, nr_pages);
	return __remove_pages(zone, start_pfn, nr_pages, altmap);
}
#endif
#endif
Loading