Commit e29c6a13 authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'block-5.6-2020-02-16' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
 "Not a lot here, which is great, basically just three small bcache
  fixes from Coly, and four NVMe fixes via Keith"

* tag 'block-5.6-2020-02-16' of git://git.kernel.dk/linux-block:
  nvme: fix the parameter order for nvme_get_log in nvme_get_fw_slot_info
  nvme/pci: move cqe check after device shutdown
  nvme: prevent warning triggered by nvme_stop_keep_alive
  nvme/tcp: fix bug on double requeue when send fails
  bcache: remove macro nr_to_fifo_front()
  bcache: Revert "bcache: shrink btree node cache after bch_btree_check()"
  bcache: ignore pending signals when creating gc and allocator thread
parents 713db356 f25372ff
Loading
Loading
Loading
Loading
+16 −2
Original line number Diff line number Diff line
@@ -67,6 +67,7 @@
#include <linux/blkdev.h>
#include <linux/kthread.h>
#include <linux/random.h>
#include <linux/sched/signal.h>
#include <trace/events/bcache.h>

#define MAX_OPEN_BUCKETS 128
@@ -733,8 +734,21 @@ int bch_open_buckets_alloc(struct cache_set *c)

int bch_cache_allocator_start(struct cache *ca)
{
	struct task_struct *k = kthread_run(bch_allocator_thread,
					    ca, "bcache_allocator");
	struct task_struct *k;

	/*
	 * In case previous btree check operation occupies too many
	 * system memory for bcache btree node cache, and the
	 * registering process is selected by OOM killer. Here just
	 * ignore the SIGKILL sent by OOM killer if there is, to
	 * avoid kthread_run() being failed by pending signals. The
	 * bcache registering process will exit after the registration
	 * done.
	 */
	if (signal_pending(current))
		flush_signals(current);

	k = kthread_run(bch_allocator_thread, ca, "bcache_allocator");
	if (IS_ERR(k))
		return PTR_ERR(k);

+13 −0
Original line number Diff line number Diff line
@@ -34,6 +34,7 @@
#include <linux/random.h>
#include <linux/rcupdate.h>
#include <linux/sched/clock.h>
#include <linux/sched/signal.h>
#include <linux/rculist.h>
#include <linux/delay.h>
#include <trace/events/bcache.h>
@@ -1913,6 +1914,18 @@ static int bch_gc_thread(void *arg)

int bch_gc_thread_start(struct cache_set *c)
{
	/*
	 * In case previous btree check operation occupies too many
	 * system memory for bcache btree node cache, and the
	 * registering process is selected by OOM killer. Here just
	 * ignore the SIGKILL sent by OOM killer if there is, to
	 * avoid kthread_run() being failed by pending signals. The
	 * bcache registering process will exit after the registration
	 * done.
	 */
	if (signal_pending(current))
		flush_signals(current);

	c->gc_thread = kthread_run(bch_gc_thread, c, "bcache_gc");
	return PTR_ERR_OR_ZERO(c->gc_thread);
}
+2 −5
Original line number Diff line number Diff line
@@ -417,8 +417,6 @@ err:

/* Journalling */

#define nr_to_fifo_front(p, front_p, mask)	(((p) - (front_p)) & (mask))

static void btree_flush_write(struct cache_set *c)
{
	struct btree *b, *t, *btree_nodes[BTREE_FLUSH_NR];
@@ -510,8 +508,7 @@ static void btree_flush_write(struct cache_set *c)
		 *   journal entry can be reclaimed). These selected nodes
		 *   will be ignored and skipped in the folowing for-loop.
		 */
		if (nr_to_fifo_front(btree_current_write(b)->journal,
				     fifo_front_p,
		if (((btree_current_write(b)->journal - fifo_front_p) &
		     mask) != 0) {
			mutex_unlock(&b->write_lock);
			continue;
+0 −17
Original line number Diff line number Diff line
@@ -1917,23 +1917,6 @@ static int run_cache_set(struct cache_set *c)
		if (bch_btree_check(c))
			goto err;

		/*
		 * bch_btree_check() may occupy too much system memory which
		 * has negative effects to user space application (e.g. data
		 * base) performance. Shrink the mca cache memory proactively
		 * here to avoid competing memory with user space workloads..
		 */
		if (!c->shrinker_disabled) {
			struct shrink_control sc;

			sc.gfp_mask = GFP_KERNEL;
			sc.nr_to_scan = c->btree_cache_used * c->btree_pages;
			/* first run to clear b->accessed tag */
			c->shrink.scan_objects(&c->shrink, &sc);
			/* second run to reap non-accessed nodes */
			c->shrink.scan_objects(&c->shrink, &sc);
		}

		bch_journal_mark(c, &journal);
		bch_initial_gc_finish(c);
		pr_debug("btree_check() done");
+6 −6
Original line number Diff line number Diff line
@@ -66,8 +66,8 @@ MODULE_PARM_DESC(streams, "turn on support for Streams write directives");
 * nvme_reset_wq - hosts nvme reset works
 * nvme_delete_wq - hosts nvme delete works
 *
 * nvme_wq will host works such are scan, aen handling, fw activation,
 * keep-alive error recovery, periodic reconnects etc. nvme_reset_wq
 * nvme_wq will host works such as scan, aen handling, fw activation,
 * keep-alive, periodic reconnects etc. nvme_reset_wq
 * runs reset works which also flush works hosted on nvme_wq for
 * serialization purposes. nvme_delete_wq host controller deletion
 * works which flush reset works for serialization.
@@ -976,7 +976,7 @@ static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
		startka = true;
	spin_unlock_irqrestore(&ctrl->lock, flags);
	if (startka)
		schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
		queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ);
}

static int nvme_keep_alive(struct nvme_ctrl *ctrl)
@@ -1006,7 +1006,7 @@ static void nvme_keep_alive_work(struct work_struct *work)
		dev_dbg(ctrl->device,
			"reschedule traffic based keep-alive timer\n");
		ctrl->comp_seen = false;
		schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
		queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ);
		return;
	}

@@ -1023,7 +1023,7 @@ static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
	if (unlikely(ctrl->kato == 0))
		return;

	schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
	queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ);
}

void nvme_stop_keep_alive(struct nvme_ctrl *ctrl)
@@ -3867,7 +3867,7 @@ static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl)
	if (!log)
		return;

	if (nvme_get_log(ctrl, NVME_NSID_ALL, 0, NVME_LOG_FW_SLOT, log,
	if (nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_FW_SLOT, 0, log,
			sizeof(*log), 0))
		dev_warn(ctrl->device, "Get FW SLOT INFO log error\n");
	kfree(log);
Loading