Commit 48b4b4ff authored by Linus Torvalds's avatar Linus Torvalds
Browse files

Merge tag 'for-5.6/block-2020-01-27' of git://git.kernel.dk/linux-block

Pull core block updates from Jens Axboe:
 "This may be the most quiet round we've had in years. I'm not
  complaining. Really not a lot to detail here, outside of spelling and
  documentation improvements/fixes, we have:

   - Allow t10-pi to be modular (Herbert)

   - Remove dead code in bfq (Alex)

   - Mark zone management requests with REQ_SYNC (Chaitanya)

   - BFQ division improvement (Wen)

   - Small series improving plugging (Pavel)"

* tag 'for-5.6/block-2020-01-27' of git://git.kernel.dk/linux-block:
  partitions/ldm: fix spelling mistake "to" -> "too"
  block, bfq: improve arithmetic division in bfq_delta()
  block/bfq: remove unused bfq_class_rt which never used
  block: mark zone-mgmt bios with REQ_SYNC
  blk-mq: Document functions for sending request
  block: Allow t10-pi to be modular
  blk-mq: optimise blk_mq_flush_plug_list()
  list: introduce list_for_each_continue()
  blk-mq: optimise rq sort function
parents 34dabd81 5336da37
Loading
Loading
Loading
Loading
+5 −1
Original line number Diff line number Diff line
@@ -66,7 +66,6 @@ config BLK_DEV_BSGLIB

config BLK_DEV_INTEGRITY
	bool "Block layer data integrity support"
	select CRC_T10DIF if BLK_DEV_INTEGRITY
	---help---
	Some storage devices allow extra information to be
	stored/retrieved to help protect the data.  The block layer
@@ -77,6 +76,11 @@ config BLK_DEV_INTEGRITY
	T10/SCSI Data Integrity Field or the T13/ATA External Path
	Protection.  If in doubt, say N.

config BLK_DEV_INTEGRITY_T10
	tristate
	depends on BLK_DEV_INTEGRITY
	select CRC_T10DIF

config BLK_DEV_ZONED
	bool "Zoned block device support"
	select MQ_IOSCHED_DEADLINE
+2 −1
Original line number Diff line number Diff line
@@ -27,7 +27,8 @@ obj-$(CONFIG_IOSCHED_BFQ) += bfq.o

obj-$(CONFIG_BLOCK_COMPAT)	+= compat_ioctl.o
obj-$(CONFIG_BLK_CMDLINE_PARSER)	+= cmdline-parser.o
obj-$(CONFIG_BLK_DEV_INTEGRITY) += bio-integrity.o blk-integrity.o t10-pi.o
obj-$(CONFIG_BLK_DEV_INTEGRITY) += bio-integrity.o blk-integrity.o
obj-$(CONFIG_BLK_DEV_INTEGRITY_T10)	+= t10-pi.o
obj-$(CONFIG_BLK_MQ_PCI)	+= blk-mq-pci.o
obj-$(CONFIG_BLK_MQ_VIRTIO)	+= blk-mq-virtio.o
obj-$(CONFIG_BLK_MQ_RDMA)	+= blk-mq-rdma.o
+0 −1
Original line number Diff line number Diff line
@@ -427,7 +427,6 @@ void bfq_schedule_dispatch(struct bfq_data *bfqd)
}

#define bfq_class_idle(bfqq)	((bfqq)->ioprio_class == IOPRIO_CLASS_IDLE)
#define bfq_class_rt(bfqq)	((bfqq)->ioprio_class == IOPRIO_CLASS_RT)

#define bfq_sample_valid(samples)	((samples) > 80)

+1 −4
Original line number Diff line number Diff line
@@ -277,10 +277,7 @@ struct bfq_queue *bfq_entity_to_bfqq(struct bfq_entity *entity)
 */
static u64 bfq_delta(unsigned long service, unsigned long weight)
{
	u64 d = (u64)service << WFQ_SERVICE_SHIFT;

	do_div(d, weight);
	return d;
	return div64_ul((u64)service << WFQ_SERVICE_SHIFT, weight);
}

/**
+106 −48
Original line number Diff line number Diff line
@@ -641,6 +641,14 @@ bool blk_mq_complete_request(struct request *rq)
}
EXPORT_SYMBOL(blk_mq_complete_request);

/**
 * blk_mq_start_request - Start processing a request
 * @rq: Pointer to request to be started
 *
 * Function used by device drivers to notify the block layer that a request
 * is going to be processed now, so blk layer can do proper initializations
 * such as starting the timeout timer.
 */
void blk_mq_start_request(struct request *rq)
{
	struct request_queue *q = rq->q;
@@ -1327,6 +1335,12 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
	return (queued + errors) != 0;
}

/**
 * __blk_mq_run_hw_queue - Run a hardware queue.
 * @hctx: Pointer to the hardware queue to run.
 *
 * Send pending requests to the hardware.
 */
static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
{
	int srcu_idx;
@@ -1424,6 +1438,15 @@ select_cpu:
	return next_cpu;
}

/**
 * __blk_mq_delay_run_hw_queue - Run (or schedule to run) a hardware queue.
 * @hctx: Pointer to the hardware queue to run.
 * @async: If we want to run the queue asynchronously.
 * @msecs: Microseconds of delay to wait before running the queue.
 *
 * If !@async, try to run the queue now. Else, run the queue asynchronously and
 * with a delay of @msecs.
 */
static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
					unsigned long msecs)
{
@@ -1445,12 +1468,28 @@ static void __blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async,
				    msecs_to_jiffies(msecs));
}

/**
 * blk_mq_delay_run_hw_queue - Run a hardware queue asynchronously.
 * @hctx: Pointer to the hardware queue to run.
 * @msecs: Microseconds of delay to wait before running the queue.
 *
 * Run a hardware queue asynchronously with a delay of @msecs.
 */
void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs)
{
	__blk_mq_delay_run_hw_queue(hctx, true, msecs);
}
EXPORT_SYMBOL(blk_mq_delay_run_hw_queue);

/**
 * blk_mq_run_hw_queue - Start to run a hardware queue.
 * @hctx: Pointer to the hardware queue to run.
 * @async: If we want to run the queue asynchronously.
 *
 * Check if the request queue is not in a quiesced state and if there are
 * pending requests to be sent. If this is true, run the queue to send requests
 * to hardware.
 */
void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
{
	int srcu_idx;
@@ -1474,6 +1513,11 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
}
EXPORT_SYMBOL(blk_mq_run_hw_queue);

/**
 * blk_mq_run_hw_queue - Run all hardware queues in a request queue.
 * @q: Pointer to the request queue to run.
 * @async: If we want to run the queue asynchronously.
 */
void blk_mq_run_hw_queues(struct request_queue *q, bool async)
{
	struct blk_mq_hw_ctx *hctx;
@@ -1625,7 +1669,11 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
	blk_mq_hctx_mark_pending(hctx, ctx);
}

/*
/**
 * blk_mq_request_bypass_insert - Insert a request at dispatch list.
 * @rq: Pointer to request to be inserted.
 * @run_queue: If we should run the hardware queue after inserting the request.
 *
 * Should only be used carefully, when the caller knows we want to
 * bypass a potential IO scheduler on the target device.
 */
@@ -1668,28 +1716,20 @@ static int plug_rq_cmp(void *priv, struct list_head *a, struct list_head *b)
	struct request *rqa = container_of(a, struct request, queuelist);
	struct request *rqb = container_of(b, struct request, queuelist);

	if (rqa->mq_ctx < rqb->mq_ctx)
		return -1;
	else if (rqa->mq_ctx > rqb->mq_ctx)
		return 1;
	else if (rqa->mq_hctx < rqb->mq_hctx)
		return -1;
	else if (rqa->mq_hctx > rqb->mq_hctx)
		return 1;
	if (rqa->mq_ctx != rqb->mq_ctx)
		return rqa->mq_ctx > rqb->mq_ctx;
	if (rqa->mq_hctx != rqb->mq_hctx)
		return rqa->mq_hctx > rqb->mq_hctx;

	return blk_rq_pos(rqa) > blk_rq_pos(rqb);
}

void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)
{
	struct blk_mq_hw_ctx *this_hctx;
	struct blk_mq_ctx *this_ctx;
	struct request_queue *this_q;
	struct request *rq;
	LIST_HEAD(list);
	LIST_HEAD(rq_list);
	unsigned int depth;

	if (list_empty(&plug->mq_list))
		return;
	list_splice_init(&plug->mq_list, &list);

	if (plug->rq_count > 2 && plug->multiple_queues)
@@ -1697,42 +1737,27 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule)

	plug->rq_count = 0;

	this_q = NULL;
	this_hctx = NULL;
	this_ctx = NULL;
	depth = 0;

	while (!list_empty(&list)) {
		rq = list_entry_rq(list.next);
		list_del_init(&rq->queuelist);
	do {
		struct list_head rq_list;
		struct request *rq, *head_rq = list_entry_rq(list.next);
		struct list_head *pos = &head_rq->queuelist; /* skip first */
		struct blk_mq_hw_ctx *this_hctx = head_rq->mq_hctx;
		struct blk_mq_ctx *this_ctx = head_rq->mq_ctx;
		unsigned int depth = 1;

		list_for_each_continue(pos, &list) {
			rq = list_entry_rq(pos);
			BUG_ON(!rq->q);
		if (rq->mq_hctx != this_hctx || rq->mq_ctx != this_ctx) {
			if (this_hctx) {
				trace_block_unplug(this_q, depth, !from_schedule);
				blk_mq_sched_insert_requests(this_hctx, this_ctx,
								&rq_list,
								from_schedule);
			}

			this_q = rq->q;
			this_ctx = rq->mq_ctx;
			this_hctx = rq->mq_hctx;
			depth = 0;
		}

			if (rq->mq_hctx != this_hctx || rq->mq_ctx != this_ctx)
				break;
			depth++;
		list_add_tail(&rq->queuelist, &rq_list);
		}

	/*
	 * If 'this_hctx' is set, we know we have entries to complete
	 * on 'rq_list'. Do those.
	 */
	if (this_hctx) {
		trace_block_unplug(this_q, depth, !from_schedule);
		list_cut_before(&rq_list, &list, pos);
		trace_block_unplug(head_rq->q, depth, !from_schedule);
		blk_mq_sched_insert_requests(this_hctx, this_ctx, &rq_list,
						from_schedule);
	}
	} while(!list_empty(&list));
}

static void blk_mq_bio_to_request(struct request *rq, struct bio *bio,
@@ -1828,6 +1853,17 @@ insert:
	return BLK_STS_OK;
}

/**
 * blk_mq_try_issue_directly - Try to send a request directly to device driver.
 * @hctx: Pointer of the associated hardware queue.
 * @rq: Pointer to request to be sent.
 * @cookie: Request queue cookie.
 *
 * If the device has enough resources to accept a new request now, send the
 * request directly to device driver. Else, insert at hctx->dispatch queue, so
 * we can try send it another time in the future. Requests inserted at this
 * queue have higher priority.
 */
static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
		struct request *rq, blk_qc_t *cookie)
{
@@ -1905,6 +1941,22 @@ static void blk_add_rq_to_plug(struct blk_plug *plug, struct request *rq)
	}
}

/**
 * blk_mq_make_request - Create and send a request to block device.
 * @q: Request queue pointer.
 * @bio: Bio pointer.
 *
 * Builds up a request structure from @q and @bio and send to the device. The
 * request may not be queued directly to hardware if:
 * * This request can be merged with another one
 * * We want to place request at plug queue for possible future merging
 * * There is an IO scheduler active at this queue
 *
 * It will not queue the request if there is an error with the bio, or at the
 * request creation.
 *
 * Returns: Request queue cookie.
 */
static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
{
	const int is_sync = op_is_sync(bio->bi_opf);
@@ -1950,7 +2002,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)

	plug = blk_mq_plug(q, bio);
	if (unlikely(is_flush_fua)) {
		/* bypass scheduler for flush rq */
		/* Bypass scheduler for flush requests */
		blk_insert_flush(rq);
		blk_mq_run_hw_queue(data.hctx, true);
	} else if (plug && (q->nr_hw_queues == 1 || q->mq_ops->commit_rqs ||
@@ -1978,6 +2030,7 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)

		blk_add_rq_to_plug(plug, rq);
	} else if (q->elevator) {
		/* Insert the request at the IO scheduler queue */
		blk_mq_sched_insert_request(rq, false, true, true);
	} else if (plug && !blk_queue_nomerges(q)) {
		/*
@@ -2004,8 +2057,13 @@ static blk_qc_t blk_mq_make_request(struct request_queue *q, struct bio *bio)
		}
	} else if ((q->nr_hw_queues > 1 && is_sync) ||
			!data.hctx->dispatch_busy) {
		/*
		 * There is no scheduler and we can try to send directly
		 * to the hardware.
		 */
		blk_mq_try_issue_directly(data.hctx, rq, &cookie);
	} else {
		/* Default case. */
		blk_mq_sched_insert_request(rq, false, true, true);
	}

Loading