Commit 396bcc52 authored by Matthew Wilcox (Oracle)'s avatar Matthew Wilcox (Oracle) Committed by Linus Torvalds
Browse files

mm: remove CONFIG_TRANSPARENT_HUGE_PAGECACHE



Commit e496cf3d ("thp: introduce CONFIG_TRANSPARENT_HUGE_PAGECACHE")
notes that it should be reverted when the PowerPC problem was fixed.  The
commit fixing the PowerPC problem (953c66c2) did not revert the
commit; instead setting CONFIG_TRANSPARENT_HUGE_PAGECACHE to the same as
CONFIG_TRANSPARENT_HUGEPAGE.  Checking with Kirill and Aneesh, this was an
oversight, so remove the Kconfig symbol and undo the work of commit
e496cf3d.

Signed-off-by: default avatarMatthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Acked-by: default avatarKirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Cc: Christoph Hellwig <hch@lst.de>
Cc: Pankaj Gupta <pankaj.gupta.linux@gmail.com>
Link: http://lkml.kernel.org/r/20200318140253.6141-6-willy@infradead.org


Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent a0650604
Loading
Loading
Loading
Loading
+1 −9
Original line number Diff line number Diff line
@@ -78,6 +78,7 @@ extern void shmem_truncate_range(struct inode *inode, loff_t start, loff_t end);
extern int shmem_unuse(unsigned int type, bool frontswap,
		       unsigned long *fs_pages_to_unuse);

extern bool shmem_huge_enabled(struct vm_area_struct *vma);
extern unsigned long shmem_swap_usage(struct vm_area_struct *vma);
extern unsigned long shmem_partial_swap_usage(struct address_space *mapping,
						pgoff_t start, pgoff_t end);
@@ -114,15 +115,6 @@ static inline bool shmem_file(struct file *file)
extern bool shmem_charge(struct inode *inode, long pages);
extern void shmem_uncharge(struct inode *inode, long pages);

#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
extern bool shmem_huge_enabled(struct vm_area_struct *vma);
#else
static inline bool shmem_huge_enabled(struct vm_area_struct *vma)
{
	return false;
}
#endif

#ifdef CONFIG_SHMEM
extern int shmem_mcopy_atomic_pte(struct mm_struct *dst_mm, pmd_t *dst_pmd,
				  struct vm_area_struct *dst_vma,
+1 −5
Original line number Diff line number Diff line
@@ -420,10 +420,6 @@ config THP_SWAP

	  For selection by architectures with reasonable THP sizes.

config	TRANSPARENT_HUGE_PAGECACHE
	def_bool y
	depends on TRANSPARENT_HUGEPAGE

#
# UP and nommu archs use km based percpu allocator
#
@@ -714,7 +710,7 @@ config GUP_GET_PTE_LOW_HIGH

config READ_ONLY_THP_FOR_FS
	bool "Read-only THP for filesystems (EXPERIMENTAL)"
	depends on TRANSPARENT_HUGE_PAGECACHE && SHMEM
	depends on TRANSPARENT_HUGEPAGE && SHMEM

	help
	  Allow khugepaged to put read-only file-backed pages in THP.
+1 −1
Original line number Diff line number Diff line
@@ -326,7 +326,7 @@ static struct attribute *hugepage_attr[] = {
	&defrag_attr.attr,
	&use_zero_page_attr.attr,
	&hpage_pmd_size_attr.attr,
#if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE)
#ifdef CONFIG_SHMEM
	&shmem_enabled_attr.attr,
#endif
#ifdef CONFIG_DEBUG_VM
+4 −8
Original line number Diff line number Diff line
@@ -414,8 +414,6 @@ static bool hugepage_vma_check(struct vm_area_struct *vma,
	    (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) &&
	     vma->vm_file &&
	     (vm_flags & VM_DENYWRITE))) {
		if (!IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE))
			return false;
		return IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
				HPAGE_PMD_NR);
	}
@@ -1258,7 +1256,7 @@ static void collect_mm_slot(struct mm_slot *mm_slot)
	}
}

#if defined(CONFIG_SHMEM) && defined(CONFIG_TRANSPARENT_HUGE_PAGECACHE)
#ifdef CONFIG_SHMEM
/*
 * Notify khugepaged that given addr of the mm is pte-mapped THP. Then
 * khugepaged should try to collapse the page table.
@@ -1973,6 +1971,8 @@ skip:
		if (khugepaged_scan.address < hstart)
			khugepaged_scan.address = hstart;
		VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
		if (shmem_file(vma->vm_file) && !shmem_huge_enabled(vma))
			goto skip;

		while (khugepaged_scan.address < hend) {
			int ret;
@@ -1984,14 +1984,10 @@ skip:
				  khugepaged_scan.address + HPAGE_PMD_SIZE >
				  hend);
			if (IS_ENABLED(CONFIG_SHMEM) && vma->vm_file) {
				struct file *file;
				struct file *file = get_file(vma->vm_file);
				pgoff_t pgoff = linear_page_index(vma,
						khugepaged_scan.address);

				if (shmem_file(vma->vm_file)
				    && !shmem_huge_enabled(vma))
					goto skip;
				file = get_file(vma->vm_file);
				up_read(&mm->mmap_sem);
				ret = 1;
				khugepaged_scan_file(mm, file, pgoff, hpage);
+2 −3
Original line number Diff line number Diff line
@@ -3373,7 +3373,7 @@ map_pte:
	return 0;
}

#ifdef CONFIG_TRANSPARENT_HUGE_PAGECACHE
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static void deposit_prealloc_pte(struct vm_fault *vmf)
{
	struct vm_area_struct *vma = vmf->vma;
@@ -3475,8 +3475,7 @@ vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
	pte_t entry;
	vm_fault_t ret;

	if (pmd_none(*vmf->pmd) && PageTransCompound(page) &&
			IS_ENABLED(CONFIG_TRANSPARENT_HUGE_PAGECACHE)) {
	if (pmd_none(*vmf->pmd) && PageTransCompound(page)) {
		/* THP on COW? */
		VM_BUG_ON_PAGE(memcg, page);

Loading