Commit 3893c202 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull erofs updates from Gao Xiang:
 "A regression fix, several cleanups and (maybe) plus an upcoming new
  mount api convert patch as a part of vfs update are considered
  available for this cycle.

  All commits have been in linux-next and tested with no smoke out.

  Summary:

   - fix an out-of-bound read access introduced in v5.3, which could
     rarely cause data corruption

   - various cleanup patches"

* tag 'erofs-for-5.6-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/xiang/erofs:
  erofs: clean up z_erofs_submit_queue()
  erofs: fold in postsubmit_is_all_bypassed()
  erofs: fix out-of-bound read for shifted uncompressed block
  erofs: remove void tagging/untagging of workgroup pointers
  erofs: remove unused tag argument while registering a workgroup
  erofs: remove unused tag argument while finding a workgroup
  erofs: correct indentation of an assigned structure inside a function
parents 53070406 1e4a2955
Loading
Loading
Loading
Loading
+10 −12
Original line number Diff line number Diff line
@@ -306,24 +306,22 @@ static int z_erofs_shifted_transform(const struct z_erofs_decompress_req *rq,
	}

	src = kmap_atomic(*rq->in);
	if (!rq->out[0]) {
		dst = NULL;
	} else {
	if (rq->out[0]) {
		dst = kmap_atomic(rq->out[0]);
		memcpy(dst + rq->pageofs_out, src, righthalf);
		kunmap_atomic(dst);
	}

	if (nrpages_out == 2) {
		DBG_BUGON(!rq->out[1]);
		if (rq->out[1] == *rq->in) {
			memmove(src, src + righthalf, rq->pageofs_out);
	} else if (nrpages_out == 2) {
		if (dst)
			kunmap_atomic(dst);
		DBG_BUGON(!rq->out[1]);
		} else {
			dst = kmap_atomic(rq->out[1]);
			memcpy(dst, src + righthalf, rq->pageofs_out);
	}
	if (dst)
			kunmap_atomic(dst);
		}
	}
	kunmap_atomic(src);
	return 0;
}
+2 −2
Original line number Diff line number Diff line
@@ -401,9 +401,9 @@ static inline void *erofs_get_pcpubuf(unsigned int pagenr)
#ifdef CONFIG_EROFS_FS_ZIP
int erofs_workgroup_put(struct erofs_workgroup *grp);
struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb,
					     pgoff_t index, bool *tag);
					     pgoff_t index);
int erofs_register_workgroup(struct super_block *sb,
			     struct erofs_workgroup *grp, bool tag);
			     struct erofs_workgroup *grp);
void erofs_workgroup_free_rcu(struct erofs_workgroup *grp);
void erofs_shrinker_register(struct super_block *sb);
void erofs_shrinker_unregister(struct super_block *sb);
+4 −11
Original line number Diff line number Diff line
@@ -59,7 +59,7 @@ repeat:
}

struct erofs_workgroup *erofs_find_workgroup(struct super_block *sb,
					     pgoff_t index, bool *tag)
					     pgoff_t index)
{
	struct erofs_sb_info *sbi = EROFS_SB(sb);
	struct erofs_workgroup *grp;
@@ -68,9 +68,6 @@ repeat:
	rcu_read_lock();
	grp = radix_tree_lookup(&sbi->workstn_tree, index);
	if (grp) {
		*tag = xa_pointer_tag(grp);
		grp = xa_untag_pointer(grp);

		if (erofs_workgroup_get(grp)) {
			/* prefer to relax rcu read side */
			rcu_read_unlock();
@@ -84,8 +81,7 @@ repeat:
}

int erofs_register_workgroup(struct super_block *sb,
			     struct erofs_workgroup *grp,
			     bool tag)
			     struct erofs_workgroup *grp)
{
	struct erofs_sb_info *sbi;
	int err;
@@ -103,8 +99,6 @@ int erofs_register_workgroup(struct super_block *sb,
	sbi = EROFS_SB(sb);
	xa_lock(&sbi->workstn_tree);

	grp = xa_tag_pointer(grp, tag);

	/*
	 * Bump up reference count before making this workgroup
	 * visible to other users in order to avoid potential UAF
@@ -175,8 +169,7 @@ static bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
	 * however in order to avoid some race conditions, add a
	 * DBG_BUGON to observe this in advance.
	 */
	DBG_BUGON(xa_untag_pointer(radix_tree_delete(&sbi->workstn_tree,
						     grp->index)) != grp);
	DBG_BUGON(radix_tree_delete(&sbi->workstn_tree, grp->index) != grp);

	/*
	 * If managed cache is on, last refcount should indicate
@@ -201,7 +194,7 @@ repeat:
				       batch, first_index, PAGEVEC_SIZE);

	for (i = 0; i < found; ++i) {
		struct erofs_workgroup *grp = xa_untag_pointer(batch[i]);
		struct erofs_workgroup *grp = batch[i];

		first_index = grp->index + 1;

+9 −8
Original line number Diff line number Diff line
@@ -49,7 +49,8 @@ static inline const struct xattr_handler *erofs_xattr_handler(unsigned int idx)
	static const struct xattr_handler *xattr_handler_map[] = {
		[EROFS_XATTR_INDEX_USER] = &erofs_xattr_user_handler,
#ifdef CONFIG_EROFS_FS_POSIX_ACL
	[EROFS_XATTR_INDEX_POSIX_ACL_ACCESS] = &posix_acl_access_xattr_handler,
		[EROFS_XATTR_INDEX_POSIX_ACL_ACCESS] =
			&posix_acl_access_xattr_handler,
		[EROFS_XATTR_INDEX_POSIX_ACL_DEFAULT] =
			&posix_acl_default_xattr_handler,
#endif
+49 −74
Original line number Diff line number Diff line
@@ -345,9 +345,8 @@ static int z_erofs_lookup_collection(struct z_erofs_collector *clt,
	struct z_erofs_pcluster *pcl;
	struct z_erofs_collection *cl;
	unsigned int length;
	bool tag;

	grp = erofs_find_workgroup(inode->i_sb, map->m_pa >> PAGE_SHIFT, &tag);
	grp = erofs_find_workgroup(inode->i_sb, map->m_pa >> PAGE_SHIFT);
	if (!grp)
		return -ENOENT;

@@ -438,7 +437,7 @@ static int z_erofs_register_collection(struct z_erofs_collector *clt,
	 */
	mutex_trylock(&cl->lock);

	err = erofs_register_workgroup(inode->i_sb, &pcl->obj, 0);
	err = erofs_register_workgroup(inode->i_sb, &pcl->obj);
	if (err) {
		mutex_unlock(&cl->lock);
		kmem_cache_free(pcluster_cachep, pcl);
@@ -1149,21 +1148,7 @@ static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl,
	qtail[JQ_BYPASS] = &pcl->next;
}

static bool postsubmit_is_all_bypassed(struct z_erofs_decompressqueue *q[],
				       unsigned int nr_bios, bool force_fg)
{
	/*
	 * although background is preferred, no one is pending for submission.
	 * don't issue workqueue for decompression but drop it directly instead.
	 */
	if (force_fg || nr_bios)
		return false;

	kvfree(q[JQ_SUBMIT]);
	return true;
}

static bool z_erofs_submit_queue(struct super_block *sb,
static void z_erofs_submit_queue(struct super_block *sb,
				 z_erofs_next_pcluster_t owned_head,
				 struct list_head *pagepool,
				 struct z_erofs_decompressqueue *fgq,
@@ -1172,19 +1157,12 @@ static bool z_erofs_submit_queue(struct super_block *sb,
	struct erofs_sb_info *const sbi = EROFS_SB(sb);
	z_erofs_next_pcluster_t qtail[NR_JOBQUEUES];
	struct z_erofs_decompressqueue *q[NR_JOBQUEUES];
	struct bio *bio;
	void *bi_private;
	/* since bio will be NULL, no need to initialize last_index */
	pgoff_t uninitialized_var(last_index);
	bool force_submit = false;
	unsigned int nr_bios;
	unsigned int nr_bios = 0;
	struct bio *bio = NULL;

	if (owned_head == Z_EROFS_PCLUSTER_TAIL)
		return false;

	force_submit = false;
	bio = NULL;
	nr_bios = 0;
	bi_private = jobqueueset_init(sb, q, fgq, force_fg);
	qtail[JQ_BYPASS] = &q[JQ_BYPASS]->head;
	qtail[JQ_SUBMIT] = &q[JQ_SUBMIT]->head;
@@ -1194,11 +1172,9 @@ static bool z_erofs_submit_queue(struct super_block *sb,

	do {
		struct z_erofs_pcluster *pcl;
		unsigned int clusterpages;
		pgoff_t first_index;
		struct page *page;
		unsigned int i = 0, bypass = 0;
		int err;
		pgoff_t cur, end;
		unsigned int i = 0;
		bool bypass = true;

		/* no possible 'owned_head' equals the following */
		DBG_BUGON(owned_head == Z_EROFS_PCLUSTER_TAIL_CLOSED);
@@ -1206,26 +1182,24 @@ static bool z_erofs_submit_queue(struct super_block *sb,

		pcl = container_of(owned_head, struct z_erofs_pcluster, next);

		clusterpages = BIT(pcl->clusterbits);
		cur = pcl->obj.index;
		end = cur + BIT(pcl->clusterbits);

		/* close the main owned chain at first */
		owned_head = cmpxchg(&pcl->next, Z_EROFS_PCLUSTER_TAIL,
				     Z_EROFS_PCLUSTER_TAIL_CLOSED);

		first_index = pcl->obj.index;
		force_submit |= (first_index != last_index + 1);
		do {
			struct page *page;
			int err;

repeat:
		page = pickup_page_for_submission(pcl, i, pagepool,
			page = pickup_page_for_submission(pcl, i++, pagepool,
							  MNGD_MAPPING(sbi),
							  GFP_NOFS);
		if (!page) {
			force_submit = true;
			++bypass;
			goto skippage;
		}
			if (!page)
				continue;

		if (bio && force_submit) {
			if (bio && cur != last_index + 1) {
submit_bio_retry:
				submit_bio(bio);
				bio = NULL;
@@ -1236,11 +1210,10 @@ submit_bio_retry:

				bio->bi_end_io = z_erofs_decompressqueue_endio;
				bio_set_dev(bio, sb->s_bdev);
			bio->bi_iter.bi_sector = (sector_t)(first_index + i) <<
				bio->bi_iter.bi_sector = (sector_t)cur <<
					LOG_SECTORS_PER_BLOCK;
				bio->bi_private = bi_private;
				bio->bi_opf = REQ_OP_READ;

				++nr_bios;
			}

@@ -1248,13 +1221,11 @@ submit_bio_retry:
			if (err < PAGE_SIZE)
				goto submit_bio_retry;

		force_submit = false;
		last_index = first_index + i;
skippage:
		if (++i < clusterpages)
			goto repeat;
			last_index = cur;
			bypass = false;
		} while (++cur < end);

		if (bypass < clusterpages)
		if (!bypass)
			qtail[JQ_SUBMIT] = &pcl->next;
		else
			move_to_bypass_jobqueue(pcl, qtail, owned_head);
@@ -1263,11 +1234,15 @@ skippage:
	if (bio)
		submit_bio(bio);

	if (postsubmit_is_all_bypassed(q, nr_bios, *force_fg))
		return true;

	/*
	 * although background is preferred, no one is pending for submission.
	 * don't issue workqueue for decompression but drop it directly instead.
	 */
	if (!*force_fg && !nr_bios) {
		kvfree(q[JQ_SUBMIT]);
		return;
	}
	z_erofs_decompress_kickoff(q[JQ_SUBMIT], *force_fg, nr_bios);
	return true;
}

static void z_erofs_runqueue(struct super_block *sb,
@@ -1276,9 +1251,9 @@ static void z_erofs_runqueue(struct super_block *sb,
{
	struct z_erofs_decompressqueue io[NR_JOBQUEUES];

	if (!z_erofs_submit_queue(sb, clt->owned_head,
				  pagepool, io, &force_fg))
	if (clt->owned_head == Z_EROFS_PCLUSTER_TAIL)
		return;
	z_erofs_submit_queue(sb, clt->owned_head, pagepool, io, &force_fg);

	/* handle bypass queue (no i/o pclusters) immediately */
	z_erofs_decompress_queue(&io[JQ_BYPASS], pagepool);