Commit feec24a6 authored by Naoya Horiguchi's avatar Naoya Horiguchi Committed by Linus Torvalds
Browse files

mm, soft-offline: convert parameter to pfn

Currently soft_offline_page() receives struct page, and its sibling
memory_failure() receives pfn.  This discrepancy looks weird and makes
precheck on pfn validity tricky.  So let's align them.

Link: http://lkml.kernel.org/r/20191016234706.GA5493@www9186uo.sakura.ne.jp


Signed-off-by: default avatarNaoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Acked-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Oscar Salvador <osalvador@suse.de>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 996ff7a0
Loading
Loading
Loading
Loading
+1 −6
Original line number Diff line number Diff line
@@ -538,12 +538,7 @@ static ssize_t soft_offline_page_store(struct device *dev,
	if (kstrtoull(buf, 0, &pfn) < 0)
		return -EINVAL;
	pfn >>= PAGE_SHIFT;
	if (!pfn_valid(pfn))
		return -ENXIO;
	/* Only online pages can be soft-offlined (esp., not ZONE_DEVICE). */
	if (!pfn_to_online_page(pfn))
		return -EIO;
	ret = soft_offline_page(pfn_to_page(pfn), 0);
	ret = soft_offline_page(pfn, 0);
	return ret == 0 ? count : ret;
}

+1 −1
Original line number Diff line number Diff line
@@ -2773,7 +2773,7 @@ extern int sysctl_memory_failure_early_kill;
extern int sysctl_memory_failure_recovery;
extern void shake_page(struct page *p, int access);
extern atomic_long_t num_poisoned_pages __read_mostly;
extern int soft_offline_page(struct page *page, int flags);
extern int soft_offline_page(unsigned long pfn, int flags);


/*
+1 −1
Original line number Diff line number Diff line
@@ -895,7 +895,7 @@ static int madvise_inject_error(int behavior,
			pr_info("Soft offlining pfn %#lx at process virtual address %#lx\n",
					pfn, start);

			ret = soft_offline_page(page, MF_COUNT_INCREASED);
			ret = soft_offline_page(pfn, MF_COUNT_INCREASED);
			if (ret)
				return ret;
			continue;
+9 −10
Original line number Diff line number Diff line
@@ -1476,7 +1476,7 @@ static void memory_failure_work_func(struct work_struct *work)
		if (!gotten)
			break;
		if (entry.flags & MF_SOFT_OFFLINE)
			soft_offline_page(pfn_to_page(entry.pfn), entry.flags);
			soft_offline_page(entry.pfn, entry.flags);
		else
			memory_failure(entry.pfn, entry.flags);
	}
@@ -1857,7 +1857,7 @@ static int soft_offline_free_page(struct page *page)

/**
 * soft_offline_page - Soft offline a page.
 * @page: page to offline
 * @pfn: pfn to soft-offline
 * @flags: flags. Same as memory_failure().
 *
 * Returns 0 on success, otherwise negated errno.
@@ -1877,18 +1877,17 @@ static int soft_offline_free_page(struct page *page)
 * This is not a 100% solution for all memory, but tries to be
 * ``good enough'' for the majority of memory.
 */
int soft_offline_page(struct page *page, int flags)
int soft_offline_page(unsigned long pfn, int flags)
{
	int ret;
	unsigned long pfn = page_to_pfn(page);
	struct page *page;

	if (is_zone_device_page(page)) {
		pr_debug_ratelimited("soft_offline: %#lx page is device page\n",
				pfn);
		if (flags & MF_COUNT_INCREASED)
			put_page(page);
	if (!pfn_valid(pfn))
		return -ENXIO;
	/* Only online pages can be soft-offlined (esp., not ZONE_DEVICE). */
	page = pfn_to_online_page(pfn);
	if (!page)
		return -EIO;
	}

	if (PageHWPoison(page)) {
		pr_info("soft offline: %#lx page already poisoned\n", pfn);