Commit 5ddcee1f authored by Gao Xiang's avatar Gao Xiang
Browse files

erofs: get rid of __stagingpage_alloc helper

Now open code is much cleaner due to iterative development.

Link: https://lore.kernel.org/r/20191124025217.12345-1-hsiangkao@aol.com


Reviewed-by: default avatarChao Yu <yuchao0@huawei.com>
Signed-off-by: default avatarGao Xiang <gaoxiang25@huawei.com>
parent bda17a45
Loading
Loading
Loading
Loading
+1 −1
Original line number Diff line number Diff line
@@ -73,7 +73,7 @@ static int z_erofs_lz4_prepare_destpages(struct z_erofs_decompress_req *rq,
			victim = availables[--top];
			get_page(victim);
		} else {
			victim = erofs_allocpage(pagepool, GFP_KERNEL, false);
			victim = erofs_allocpage(pagepool, GFP_KERNEL);
			if (!victim)
				return -ENOMEM;
			victim->mapping = Z_EROFS_MAPPING_STAGING;
+1 −1
Original line number Diff line number Diff line
@@ -382,7 +382,7 @@ int erofs_namei(struct inode *dir, struct qstr *name,
extern const struct file_operations erofs_dir_fops;

/* utils.c / zdata.c */
struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp, bool nofail);
struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp);

#if (EROFS_PCPUBUF_NR_PAGES > 0)
void *erofs_get_pcpubuf(unsigned int pagenr);
+2 −2
Original line number Diff line number Diff line
@@ -7,7 +7,7 @@
#include "internal.h"
#include <linux/pagevec.h>

struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp, bool nofail)
struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp)
{
	struct page *page;

@@ -16,7 +16,7 @@ struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp, bool nofail)
		DBG_BUGON(page_ref_count(page) != 1);
		list_del(&page->lru);
	} else {
		page = alloc_pages(gfp | (nofail ? __GFP_NOFAIL : 0), 0);
		page = alloc_page(gfp);
	}
	return page;
}
+17 −20
Original line number Diff line number Diff line
@@ -546,15 +546,6 @@ static bool z_erofs_collector_end(struct z_erofs_collector *clt)
	return true;
}

static inline struct page *__stagingpage_alloc(struct list_head *pagepool,
					       gfp_t gfp)
{
	struct page *page = erofs_allocpage(pagepool, gfp, true);

	page->mapping = Z_EROFS_MAPPING_STAGING;
	return page;
}

static bool should_alloc_managed_pages(struct z_erofs_decompress_frontend *fe,
				       unsigned int cachestrategy,
				       erofs_off_t la)
@@ -661,8 +652,9 @@ retry:
	/* should allocate an additional staging page for pagevec */
	if (err == -EAGAIN) {
		struct page *const newpage =
			__stagingpage_alloc(pagepool, GFP_NOFS);
			erofs_allocpage(pagepool, GFP_NOFS | __GFP_NOFAIL);

		newpage->mapping = Z_EROFS_MAPPING_STAGING;
		err = z_erofs_attach_page(clt, newpage,
					  Z_EROFS_PAGE_TYPE_EXCLUSIVE);
		if (!err)
@@ -1079,19 +1071,24 @@ repeat:
	unlock_page(page);
	put_page(page);
out_allocpage:
	page = __stagingpage_alloc(pagepool, gfp);
	page = erofs_allocpage(pagepool, gfp | __GFP_NOFAIL);
	if (!tocache || add_to_page_cache_lru(page, mc, index + nr, gfp)) {
		/* non-LRU / non-movable temporary page is needed */
		page->mapping = Z_EROFS_MAPPING_STAGING;
		tocache = false;
	}

	if (oldpage != cmpxchg(&pcl->compressed_pages[nr], oldpage, page)) {
		if (tocache) {
			/* since it added to managed cache successfully */
			unlock_page(page);
			put_page(page);
		} else {
			list_add(&page->lru, pagepool);
		cpu_relax();
		goto repeat;
		}
	if (!tocache)
		goto out;
	if (add_to_page_cache_lru(page, mc, index + nr, gfp)) {
		page->mapping = Z_EROFS_MAPPING_STAGING;
		goto out;
		cond_resched();
		goto repeat;
	}

	set_page_private(page, (unsigned long)pcl);
	SetPagePrivate(page);
out:	/* the only exit (for tracing and debugging) */