Commit b8072d5b authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull ext2, quota, reiserfs cleanups and fixes from Jan Kara:

 - Refactor the quota on/off kernel internal interfaces (mostly for
   ubifs quota support as ubifs does not want to have inodes holding
   quota information)

 - A few other small quota fixes and cleanups

 - Various small ext2 fixes and cleanups

 - Reiserfs xattr fix and one cleanup

* tag 'for_v5.5-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs: (28 commits)
  ext2: code cleanup for descriptor_loc()
  fs/quota: handle overflows of sysctl fs.quota.* and report as unsigned long
  ext2: fix improper function comment
  ext2: code cleanup for ext2_try_to_allocate()
  ext2: skip unnecessary operations in ext2_try_to_allocate()
  ext2: Simplify initialization in ext2_try_to_allocate()
  ext2: code cleanup by calling ext2_group_last_block_no()
  ext2: introduce new helper ext2_group_last_block_no()
  reiserfs: replace open-coded atomic_dec_and_mutex_lock()
  ext2: check err when partial != NULL
  quota: Handle quotas without quota inodes in dquot_get_state()
  quota: Make dquot_disable() work without quota inodes
  quota: Drop dquot_enable()
  fs: Use dquot_load_quota_inode() from filesystems
  quota: Rename vfs_load_quota_inode() to dquot_load_quota_inode()
  quota: Simplify dquot_resume()
  quota: Factor out setup of quota inode
  quota: Check that quota is not dirty before release
  quota: fix livelock in dquot_writeback_dquots
  ext2: don't set *count in the case of failure in ext2_try_to_allocate()
  ...
parents e2d73c30 545886fe
Loading
Loading
Loading
Loading
+30 −45
Original line number Diff line number Diff line
@@ -269,7 +269,7 @@ goal_in_my_reservation(struct ext2_reserve_window *rsv, ext2_grpblk_t grp_goal,
	ext2_fsblk_t group_first_block, group_last_block;

	group_first_block = ext2_group_first_block_no(sb, group);
	group_last_block = group_first_block + EXT2_BLOCKS_PER_GROUP(sb) - 1;
	group_last_block = ext2_group_last_block_no(sb, group);

	if ((rsv->_rsv_start > group_last_block) ||
	    (rsv->_rsv_end < group_first_block))
@@ -666,37 +666,24 @@ ext2_try_to_allocate(struct super_block *sb, int group,
			unsigned long *count,
			struct ext2_reserve_window *my_rsv)
{
	ext2_fsblk_t group_first_block;
	ext2_fsblk_t group_first_block = ext2_group_first_block_no(sb, group);
	ext2_fsblk_t group_last_block = ext2_group_last_block_no(sb, group);
       	ext2_grpblk_t start, end;
	unsigned long num = 0;

	start = 0;
	end = group_last_block - group_first_block + 1;
	/* we do allocation within the reservation window if we have a window */
	if (my_rsv) {
		group_first_block = ext2_group_first_block_no(sb, group);
		if (my_rsv->_rsv_start >= group_first_block)
			start = my_rsv->_rsv_start - group_first_block;
		else
			/* reservation window cross group boundary */
			start = 0;
		if (my_rsv->_rsv_end < group_last_block)
			end = my_rsv->_rsv_end - group_first_block + 1;
		if (end > EXT2_BLOCKS_PER_GROUP(sb))
			/* reservation window crosses group boundary */
			end = EXT2_BLOCKS_PER_GROUP(sb);
		if ((start <= grp_goal) && (grp_goal < end))
			start = grp_goal;
		else
		if (grp_goal < start || grp_goal >= end)
			grp_goal = -1;
	} else {
		if (grp_goal > 0)
			start = grp_goal;
		else
			start = 0;
		end = EXT2_BLOCKS_PER_GROUP(sb);
	}

	BUG_ON(start > EXT2_BLOCKS_PER_GROUP(sb));

repeat:
	if (grp_goal < 0) {
		grp_goal = find_next_usable_block(start, bitmap_bh, end);
		if (grp_goal < 0)
@@ -711,32 +698,23 @@ repeat:
				;
		}
	}
	start = grp_goal;

	if (ext2_set_bit_atomic(sb_bgl_lock(EXT2_SB(sb), group), grp_goal,
			       				bitmap_bh->b_data)) {
		/*
		 * The block was allocated by another thread, or it was
		 * allocated and then freed by another thread
		 */
		start++;
		grp_goal++;
		if (start >= end)
			goto fail_access;
		goto repeat;
	}
	num++;
	grp_goal++;
	while (num < *count && grp_goal < end
		&& !ext2_set_bit_atomic(sb_bgl_lock(EXT2_SB(sb), group),
	for (; num < *count && grp_goal < end; grp_goal++) {
		if (ext2_set_bit_atomic(sb_bgl_lock(EXT2_SB(sb), group),
					grp_goal, bitmap_bh->b_data)) {
			if (num == 0)
				continue;
			break;
		}
		num++;
		grp_goal++;
	}

	if (num == 0)
		goto fail_access;

	*count = num;
	return grp_goal - num;
fail_access:
	*count = num;
	return -1;
}

@@ -754,10 +732,9 @@ fail_access:
 *		but we will shift to the place where start_block is,
 *		then start from there, when looking for a reservable space.
 *
 * 	@size: the target new reservation window size
 *	@sb: the super block.
 *
 * 	@group_first_block: the first block we consider to start
 *			the real search from
 * 	@start_block: the first block we consider to start the real search from
 *
 * 	@last_block:
 *		the maximum block number that our goal reservable space
@@ -908,7 +885,7 @@ static int alloc_new_reservation(struct ext2_reserve_window_node *my_rsv,
	spinlock_t *rsv_lock = &EXT2_SB(sb)->s_rsv_window_lock;

	group_first_block = ext2_group_first_block_no(sb, group);
	group_end_block = group_first_block + (EXT2_BLOCKS_PER_GROUP(sb) - 1);
	group_end_block = ext2_group_last_block_no(sb, group);

	if (grp_goal < 0)
		start_block = group_first_block;
@@ -1115,7 +1092,7 @@ ext2_try_to_allocate_with_rsv(struct super_block *sb, unsigned int group,
	 * first block is the block number of the first block in this group
	 */
	group_first_block = ext2_group_first_block_no(sb, group);
	group_last_block = group_first_block + (EXT2_BLOCKS_PER_GROUP(sb) - 1);
	group_last_block = ext2_group_last_block_no(sb, group);

	/*
	 * Basically we will allocate a new block from inode's reservation
@@ -1313,6 +1290,13 @@ retry_alloc:
	if (free_blocks > 0) {
		grp_target_blk = ((goal - le32_to_cpu(es->s_first_data_block)) %
				EXT2_BLOCKS_PER_GROUP(sb));
		/*
		 * In case we retry allocation (due to fs reservation not
		 * working out or fs corruption), the bitmap_bh is non-null
		 * pointer and we have to release it before calling
		 * read_block_bitmap().
		 */
		brelse(bitmap_bh);
		bitmap_bh = read_block_bitmap(sb, group_no);
		if (!bitmap_bh)
			goto io_error;
@@ -1404,6 +1388,7 @@ allocated:
		 * use.  So we may want to selectively mark some of the blocks
		 * as free
		 */
		num = *count;
		goto retry_alloc;
	}

+12 −0
Original line number Diff line number Diff line
@@ -813,6 +813,18 @@ ext2_group_first_block_no(struct super_block *sb, unsigned long group_no)
		le32_to_cpu(EXT2_SB(sb)->s_es->s_first_data_block);
}

static inline ext2_fsblk_t
ext2_group_last_block_no(struct super_block *sb, unsigned long group_no)
{
	struct ext2_sb_info *sbi = EXT2_SB(sb);

	if (group_no == sbi->s_groups_count - 1)
		return le32_to_cpu(sbi->s_es->s_blocks_count) - 1;
	else
		return ext2_group_first_block_no(sb, group_no) +
			EXT2_BLOCKS_PER_GROUP(sb) - 1;
}

#define ext2_set_bit	__test_and_set_bit_le
#define ext2_clear_bit	__test_and_clear_bit_le
#define ext2_test_bit	test_bit_le
+5 −2
Original line number Diff line number Diff line
@@ -701,10 +701,13 @@ static int ext2_get_blocks(struct inode *inode,
		if (!partial) {
			count++;
			mutex_unlock(&ei->truncate_mutex);
			if (err)
				goto cleanup;
			goto got_it;
		}

		if (err) {
			mutex_unlock(&ei->truncate_mutex);
			goto cleanup;
		}
	}

	/*
+4 −1
Original line number Diff line number Diff line
@@ -145,10 +145,13 @@ setversion_out:
		if (ei->i_block_alloc_info){
			struct ext2_reserve_window_node *rsv = &ei->i_block_alloc_info->rsv_window_node;
			rsv->rsv_goal_size = rsv_window_size;
		} else {
			ret = -ENOMEM;
		}

		mutex_unlock(&ei->truncate_mutex);
		mnt_drop_write_file(filp);
		return 0;
		return ret;
	}
	default:
		return -ENOTTY;
+2 −11
Original line number Diff line number Diff line
@@ -702,13 +702,7 @@ static int ext2_check_descriptors(struct super_block *sb)
	for (i = 0; i < sbi->s_groups_count; i++) {
		struct ext2_group_desc *gdp = ext2_get_group_desc(sb, i, NULL);
		ext2_fsblk_t first_block = ext2_group_first_block_no(sb, i);
		ext2_fsblk_t last_block;

		if (i == sbi->s_groups_count - 1)
			last_block = le32_to_cpu(sbi->s_es->s_blocks_count) - 1;
		else
			last_block = first_block +
				(EXT2_BLOCKS_PER_GROUP(sb) - 1);
		ext2_fsblk_t last_block = ext2_group_last_block_no(sb, i);

		if (le32_to_cpu(gdp->bg_block_bitmap) < first_block ||
		    le32_to_cpu(gdp->bg_block_bitmap) > last_block)
@@ -806,7 +800,6 @@ static unsigned long descriptor_loc(struct super_block *sb,
{
	struct ext2_sb_info *sbi = EXT2_SB(sb);
	unsigned long bg, first_meta_bg;
	int has_super = 0;
	
	first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg);

@@ -814,10 +807,8 @@ static unsigned long descriptor_loc(struct super_block *sb,
	    nr < first_meta_bg)
		return (logic_sb_block + nr + 1);
	bg = sbi->s_desc_per_block * nr;
	if (ext2_bg_has_super(sb, bg))
		has_super = 1;

	return ext2_group_first_block_no(sb, bg) + has_super;
	return ext2_group_first_block_no(sb, bg) + ext2_bg_has_super(sb, bg);
}

static int ext2_fill_super(struct super_block *sb, void *data, int silent)
Loading