Commit e3a251e3 authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull UBI/UBIFS/JFFS2 updates from Richard Weinberger:
 "This pull request contains mostly fixes for UBI, UBIFS and JFFS2:

  UBI:

   - Fix a regression around producing a anchor PEB for fastmap.

     Due to a change in our locking fastmap was unable to produce fresh
     anchors an re-used the existing one a way to often.

  UBIFS:

   - Fixes for endianness. A few places blindly assumed little endian.

   - Fix for a memory leak in the orphan code.

   - Fix for a possible crash during a commit.

   - Revert a wrong bugfix.

  JFFS2:

   - Revert a bad bugfix (false positive from a code checking tool)"

* tag 'upstream-5.5-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rw/ubifs:
  Revert "jffs2: Fix possible null-pointer dereferences in jffs2_add_frag_to_fragtree()"
  ubi: Fix producing anchor PEBs
  ubifs: ubifs_tnc_start_commit: Fix OOB in layout_in_gaps
  ubifs: do_kill_orphans: Fix a memory leak bug
  Revert "ubifs: Fix memory leak bug in alloc_ubifs_info() error path"
  ubifs: Fix type of sup->hash_algo
  ubifs: Fixed missed le64_to_cpu() in journal
  ubifs: Force prandom result to __le32
  ubifs: Remove obsolete TODO from dfs_file_write()
  ubi: Fix warning static is not at beginning of declaration
  ubi: Print skip_check in ubi_dump_vol_info()
parents 97eeb4d9 6e78c01f
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -107,6 +107,7 @@ void ubi_dump_vol_info(const struct ubi_volume *vol)
	pr_err("\tlast_eb_bytes   %d\n", vol->last_eb_bytes);
	pr_err("\tcorrupted       %d\n", vol->corrupted);
	pr_err("\tupd_marker      %d\n", vol->upd_marker);
	pr_err("\tskip_check      %d\n", vol->skip_check);

	if (vol->name_len <= UBI_VOL_NAME_MAX &&
	    strnlen(vol->name, vol->name_len + 1) == vol->name_len) {
+18 −13
Original line number Diff line number Diff line
@@ -57,18 +57,6 @@ static void return_unused_pool_pebs(struct ubi_device *ubi,
	}
}

static int anchor_pebs_available(struct rb_root *root)
{
	struct rb_node *p;
	struct ubi_wl_entry *e;

	ubi_rb_for_each_entry(p, e, root, u.rb)
		if (e->pnum < UBI_FM_MAX_START)
			return 1;

	return 0;
}

/**
 * ubi_wl_get_fm_peb - find a physical erase block with a given maximal number.
 * @ubi: UBI device description object
@@ -277,8 +265,26 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
{
	struct ubi_work *wrk;
	struct ubi_wl_entry *anchor;

	spin_lock(&ubi->wl_lock);

	/* Do we already have an anchor? */
	if (ubi->fm_anchor) {
		spin_unlock(&ubi->wl_lock);
		return 0;
	}

	/* See if we can find an anchor PEB on the list of free PEBs */
	anchor = ubi_wl_get_fm_peb(ubi, 1);
	if (anchor) {
		ubi->fm_anchor = anchor;
		spin_unlock(&ubi->wl_lock);
		return 0;
	}

	/* No luck, trigger wear leveling to produce a new anchor PEB */
	ubi->fm_do_produce_anchor = 1;
	if (ubi->wl_scheduled) {
		spin_unlock(&ubi->wl_lock);
		return 0;
@@ -294,7 +300,6 @@ int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
		return -ENOMEM;
	}

	wrk->anchor = 1;
	wrk->func = &wear_leveling_worker;
	__schedule_ubi_work(ubi, wrk);
	return 0;
+5 −9
Original line number Diff line number Diff line
@@ -1540,14 +1540,6 @@ int ubi_update_fastmap(struct ubi_device *ubi)
		return 0;
	}

	ret = ubi_ensure_anchor_pebs(ubi);
	if (ret) {
		up_write(&ubi->fm_eba_sem);
		up_write(&ubi->work_sem);
		up_write(&ubi->fm_protect);
		return ret;
	}

	new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
	if (!new_fm) {
		up_write(&ubi->fm_eba_sem);
@@ -1618,7 +1610,8 @@ int ubi_update_fastmap(struct ubi_device *ubi)
	}

	spin_lock(&ubi->wl_lock);
	tmp_e = ubi_wl_get_fm_peb(ubi, 1);
	tmp_e = ubi->fm_anchor;
	ubi->fm_anchor = NULL;
	spin_unlock(&ubi->wl_lock);

	if (old_fm) {
@@ -1670,6 +1663,9 @@ out_unlock:
	up_write(&ubi->work_sem);
	up_write(&ubi->fm_protect);
	kfree(old_fm);

	ubi_ensure_anchor_pebs(ubi);

	return ret;

err:
+5 −3
Original line number Diff line number Diff line
@@ -491,6 +491,8 @@ struct ubi_debug_info {
 * @fm_work: fastmap work queue
 * @fm_work_scheduled: non-zero if fastmap work was scheduled
 * @fast_attach: non-zero if UBI was attached by fastmap
 * @fm_anchor: The next anchor PEB to use for fastmap
 * @fm_do_produce_anchor: If true produce an anchor PEB in wl
 *
 * @used: RB-tree of used physical eraseblocks
 * @erroneous: RB-tree of erroneous used physical eraseblocks
@@ -599,6 +601,8 @@ struct ubi_device {
	struct work_struct fm_work;
	int fm_work_scheduled;
	int fast_attach;
	struct ubi_wl_entry *fm_anchor;
	int fm_do_produce_anchor;

	/* Wear-leveling sub-system's stuff */
	struct rb_root used;
@@ -789,7 +793,6 @@ struct ubi_attach_info {
 * @vol_id: the volume ID on which this erasure is being performed
 * @lnum: the logical eraseblock number
 * @torture: if the physical eraseblock has to be tortured
 * @anchor: produce a anchor PEB to by used by fastmap
 *
 * The @func pointer points to the worker function. If the @shutdown argument is
 * not zero, the worker has to free the resources and exit immediately as the
@@ -805,7 +808,6 @@ struct ubi_work {
	int vol_id;
	int lnum;
	int torture;
	int anchor;
};

#include "debug.h"
@@ -968,7 +970,7 @@ int ubi_fastmap_init_checkmap(struct ubi_volume *vol, int leb_count);
void ubi_fastmap_destroy_checkmap(struct ubi_volume *vol);
#else
static inline int ubi_update_fastmap(struct ubi_device *ubi) { return 0; }
int static inline ubi_fastmap_init_checkmap(struct ubi_volume *vol, int leb_count) { return 0; }
static inline int ubi_fastmap_init_checkmap(struct ubi_volume *vol, int leb_count) { return 0; }
static inline void ubi_fastmap_destroy_checkmap(struct ubi_volume *vol) {}
#endif

+14 −18
Original line number Diff line number Diff line
@@ -339,13 +339,6 @@ static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi,
		}
	}

	/* If no fastmap has been written and this WL entry can be used
	 * as anchor PEB, hold it back and return the second best WL entry
	 * such that fastmap can use the anchor PEB later. */
	if (prev_e && !ubi->fm_disabled &&
	    !ubi->fm && e->pnum < UBI_FM_MAX_START)
		return prev_e;

	return e;
}

@@ -656,9 +649,6 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
{
	int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
	int erase = 0, keep = 0, vol_id = -1, lnum = -1;
#ifdef CONFIG_MTD_UBI_FASTMAP
	int anchor = wrk->anchor;
#endif
	struct ubi_wl_entry *e1, *e2;
	struct ubi_vid_io_buf *vidb;
	struct ubi_vid_hdr *vid_hdr;
@@ -698,11 +688,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
	}

#ifdef CONFIG_MTD_UBI_FASTMAP
	/* Check whether we need to produce an anchor PEB */
	if (!anchor)
		anchor = !anchor_pebs_available(&ubi->free);

	if (anchor) {
	if (ubi->fm_do_produce_anchor) {
		e1 = find_anchor_wl_entry(&ubi->used);
		if (!e1)
			goto out_cancel;
@@ -719,6 +705,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
		self_check_in_wl_tree(ubi, e1, &ubi->used);
		rb_erase(&e1->u.rb, &ubi->used);
		dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum);
		ubi->fm_do_produce_anchor = 0;
	} else if (!ubi->scrub.rb_node) {
#else
	if (!ubi->scrub.rb_node) {
@@ -1051,7 +1038,6 @@ static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
		goto out_cancel;
	}

	wrk->anchor = 0;
	wrk->func = &wear_leveling_worker;
	if (nested)
		__schedule_ubi_work(ubi, wrk);
@@ -1093,8 +1079,15 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
	err = sync_erase(ubi, e, wl_wrk->torture);
	if (!err) {
		spin_lock(&ubi->wl_lock);

		if (!ubi->fm_anchor && e->pnum < UBI_FM_MAX_START) {
			ubi->fm_anchor = e;
			ubi->fm_do_produce_anchor = 0;
		} else {
			wl_tree_add(e, &ubi->free);
			ubi->free_count++;
		}

		spin_unlock(&ubi->wl_lock);

		/*
@@ -1882,6 +1875,9 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
	if (err)
		goto out_free;

#ifdef CONFIG_MTD_UBI_FASTMAP
	ubi_ensure_anchor_pebs(ubi);
#endif
	return 0;

out_free:
Loading