Commit 0bbddb8c authored by Linus Torvalds's avatar Linus Torvalds
Browse files
Pull libata updates from Tejun Heo:

 - libata has always been limiting the maximum queue depth to 31, with
   one entry set aside mostly for historical reasons. This didn't use to
   make much difference but Jens found out that modern hard drives can
   actually perform measurably better with the extra one queue depth.
   Jens updated libata core so that it can make use of full 32 queue
   depth

 - Damien updated command retry logic in error handling so that it
   doesn't unnecessarily retry when upper layer (SCSI) is gonna handle
   them

 - A couple misc changes

* 'for-4.18' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata:
  sata_fsl: use the right type for tag bitshift
  ahci: enable full queue depth of 32
  libata: don't clamp queue depth to ATA_MAX_QUEUE - 1
  libata: add extra internal command
  sata_nv: set host can_queue count appropriately
  libata: remove assumption that ATA_MAX_QUEUE - 1 is the max
  libata: use ata_tag_internal() consistently
  libata: bump ->qc_active to a 64-bit type
  libata: convert core and drivers to ->hw_tag usage
  libata: introduce notion of separate hardware tags
  libata: Fix command retry decision
  libata: Honor RQF_QUIET flag
  libata: Make ata_dev_set_mode() less verbose
  libata: Fix ata_err_string()
  libata: Fix comment typo in ata_eh_analyze_tf()
  sata_nv: don't use block layer bounce buffer
  ata: hpt37x: Convert to use match_string() helper
parents 476d9ff6 88e10092
Loading
Loading
Loading
Loading
+2 −2
Original line number Diff line number Diff line
@@ -271,7 +271,7 @@ static void acard_ahci_qc_prep(struct ata_queued_cmd *qc)
	 * Fill in command table information.  First, the header,
	 * a SATA Register - Host to Device command FIS.
	 */
	cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
	cmd_tbl = pp->cmd_tbl + qc->hw_tag * AHCI_CMD_TBL_SZ;

	ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl);
	if (is_atapi) {
@@ -294,7 +294,7 @@ static void acard_ahci_qc_prep(struct ata_queued_cmd *qc)
	if (is_atapi)
		opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;

	ahci_fill_cmd_slot(pp, qc->tag, opts);
	ahci_fill_cmd_slot(pp, qc->hw_tag, opts);
}

static bool acard_ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
+1 −1
Original line number Diff line number Diff line
@@ -390,7 +390,7 @@ extern struct device_attribute *ahci_sdev_attrs[];
 */
#define AHCI_SHT(drv_name)						\
	ATA_NCQ_SHT(drv_name),						\
	.can_queue		= AHCI_MAX_CMDS - 1,			\
	.can_queue		= AHCI_MAX_CMDS,			\
	.sg_tablesize		= AHCI_MAX_SG,				\
	.dma_boundary		= AHCI_DMA_BOUNDARY,			\
	.shost_attrs		= ahci_shost_attrs,			\
+4 −4
Original line number Diff line number Diff line
@@ -1649,7 +1649,7 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc)
	 * Fill in command table information.  First, the header,
	 * a SATA Register - Host to Device command FIS.
	 */
	cmd_tbl = pp->cmd_tbl + qc->tag * AHCI_CMD_TBL_SZ;
	cmd_tbl = pp->cmd_tbl + qc->hw_tag * AHCI_CMD_TBL_SZ;

	ata_tf_to_fis(&qc->tf, qc->dev->link->pmp, 1, cmd_tbl);
	if (is_atapi) {
@@ -1670,7 +1670,7 @@ static void ahci_qc_prep(struct ata_queued_cmd *qc)
	if (is_atapi)
		opts |= AHCI_CMD_ATAPI | AHCI_CMD_PREFETCH;

	ahci_fill_cmd_slot(pp, qc->tag, opts);
	ahci_fill_cmd_slot(pp, qc->hw_tag, opts);
}

static void ahci_fbs_dec_intr(struct ata_port *ap)
@@ -2006,7 +2006,7 @@ unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
	pp->active_link = qc->dev->link;

	if (ata_is_ncq(qc->tf.protocol))
		writel(1 << qc->tag, port_mmio + PORT_SCR_ACT);
		writel(1 << qc->hw_tag, port_mmio + PORT_SCR_ACT);

	if (pp->fbs_enabled && pp->fbs_last_dev != qc->dev->link->pmp) {
		u32 fbs = readl(port_mmio + PORT_FBS);
@@ -2016,7 +2016,7 @@ unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
		pp->fbs_last_dev = qc->dev->link->pmp;
	}

	writel(1 << qc->tag, port_mmio + PORT_CMD_ISSUE);
	writel(1 << qc->hw_tag, port_mmio + PORT_CMD_ISSUE);

	ahci_sw_activity(qc->dev->link);

+27 −34
Original line number Diff line number Diff line
@@ -759,7 +759,7 @@ int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
	tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
	tf->flags |= tf_flags;

	if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
	if (ata_ncq_enabled(dev) && !ata_tag_internal(tag)) {
		/* yay, NCQ */
		if (!lba_48_ok(block, n_block))
			return -ERANGE;
@@ -1570,8 +1570,9 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
	u8 command = tf->command;
	int auto_timeout = 0;
	struct ata_queued_cmd *qc;
	unsigned int tag, preempted_tag;
	u32 preempted_sactive, preempted_qc_active;
	unsigned int preempted_tag;
	u32 preempted_sactive;
	u64 preempted_qc_active;
	int preempted_nr_active_links;
	DECLARE_COMPLETION_ONSTACK(wait);
	unsigned long flags;
@@ -1587,20 +1588,10 @@ unsigned ata_exec_internal_sg(struct ata_device *dev,
	}

	/* initialize internal qc */
	qc = __ata_qc_from_tag(ap, ATA_TAG_INTERNAL);

	/* XXX: Tag 0 is used for drivers with legacy EH as some
	 * drivers choke if any other tag is given.  This breaks
	 * ata_tag_internal() test for those drivers.  Don't use new
	 * EH stuff without converting to it.
	 */
	if (ap->ops->error_handler)
		tag = ATA_TAG_INTERNAL;
	else
		tag = 0;

	qc = __ata_qc_from_tag(ap, tag);

	qc->tag = tag;
	qc->tag = ATA_TAG_INTERNAL;
	qc->hw_tag = 0;
	qc->scsicmd = NULL;
	qc->ap = ap;
	qc->dev = dev;
@@ -2295,7 +2286,7 @@ static int ata_dev_config_ncq(struct ata_device *dev,
		return 0;
	}
	if (ap->flags & ATA_FLAG_NCQ) {
		hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
		hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE);
		dev->flags |= ATA_DFLAG_NCQ;
	}

@@ -3573,6 +3564,8 @@ static int ata_dev_set_mode(struct ata_device *dev)
	DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
		dev->xfer_shift, (int)dev->xfer_mode);

	if (!(ehc->i.flags & ATA_EHI_QUIET) ||
	    ehc->i.flags & ATA_EHI_DID_HARDRESET)
		ata_dev_info(dev, "configured for %s%s\n",
			     ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
			     dev_err_whine);
@@ -5133,7 +5126,7 @@ struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev, int tag)
	}

	qc = __ata_qc_from_tag(ap, tag);
	qc->tag = tag;
	qc->tag = qc->hw_tag = tag;
	qc->scsicmd = NULL;
	qc->ap = ap;
	qc->dev = dev;
@@ -5163,7 +5156,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)

	qc->flags = 0;
	tag = qc->tag;
	if (likely(ata_tag_valid(tag))) {
	if (ata_tag_valid(tag)) {
		qc->tag = ATA_TAG_POISON;
		if (ap->flags & ATA_FLAG_SAS_HOST)
			ata_sas_free_tag(tag, ap);
@@ -5185,7 +5178,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)

	/* command should be marked inactive atomically with qc completion */
	if (ata_is_ncq(qc->tf.protocol)) {
		link->sactive &= ~(1 << qc->tag);
		link->sactive &= ~(1 << qc->hw_tag);
		if (!link->sactive)
			ap->nr_active_links--;
	} else {
@@ -5203,7 +5196,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
	 * is called. (when rc != 0 and atapi request sense is needed)
	 */
	qc->flags &= ~ATA_QCFLAG_ACTIVE;
	ap->qc_active &= ~(1 << qc->tag);
	ap->qc_active &= ~(1ULL << qc->tag);

	/* call completion callback */
	qc->complete_fn(qc);
@@ -5360,29 +5353,29 @@ void ata_qc_complete(struct ata_queued_cmd *qc)
 *	RETURNS:
 *	Number of completed commands on success, -errno otherwise.
 */
int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active)
int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active)
{
	int nr_done = 0;
	u32 done_mask;
	u64 done_mask;

	done_mask = ap->qc_active ^ qc_active;

	if (unlikely(done_mask & qc_active)) {
		ata_port_err(ap, "illegal qc_active transition (%08x->%08x)\n",
		ata_port_err(ap, "illegal qc_active transition (%08llx->%08llx)\n",
			     ap->qc_active, qc_active);
		return -EINVAL;
	}

	while (done_mask) {
		struct ata_queued_cmd *qc;
		unsigned int tag = __ffs(done_mask);
		unsigned int tag = __ffs64(done_mask);

		qc = ata_qc_from_tag(ap, tag);
		if (qc) {
			ata_qc_complete(qc);
			nr_done++;
		}
		done_mask &= ~(1 << tag);
		done_mask &= ~(1ULL << tag);
	}

	return nr_done;
@@ -5413,11 +5406,11 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
	WARN_ON_ONCE(ap->ops->error_handler && ata_tag_valid(link->active_tag));

	if (ata_is_ncq(prot)) {
		WARN_ON_ONCE(link->sactive & (1 << qc->tag));
		WARN_ON_ONCE(link->sactive & (1 << qc->hw_tag));

		if (!link->sactive)
			ap->nr_active_links++;
		link->sactive |= 1 << qc->tag;
		link->sactive |= 1 << qc->hw_tag;
	} else {
		WARN_ON_ONCE(link->sactive);

@@ -5426,7 +5419,7 @@ void ata_qc_issue(struct ata_queued_cmd *qc)
	}

	qc->flags |= ATA_QCFLAG_ACTIVE;
	ap->qc_active |= 1 << qc->tag;
	ap->qc_active |= 1ULL << qc->tag;

	/*
	 * We guarantee to LLDs that they will have at least one
@@ -6425,7 +6418,7 @@ void ata_host_init(struct ata_host *host, struct device *dev,
{
	spin_lock_init(&host->lock);
	mutex_init(&host->eh_mutex);
	host->n_tags = ATA_MAX_QUEUE - 1;
	host->n_tags = ATA_MAX_QUEUE;
	host->dev = dev;
	host->ops = ops;
}
@@ -6507,7 +6500,7 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
{
	int i, rc;

	host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE - 1);
	host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE);

	/* host must have been started */
	if (!(host->flags & ATA_HOST_STARTED)) {
+46 −10
Original line number Diff line number Diff line
@@ -822,9 +822,12 @@ static int ata_eh_nr_in_flight(struct ata_port *ap)
	int nr = 0;

	/* count only non-internal commands */
	for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++)
	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
		if (ata_tag_internal(tag))
			continue;
		if (ata_qc_from_tag(ap, tag))
			nr++;
	}

	return nr;
}
@@ -849,7 +852,7 @@ void ata_eh_fastdrain_timerfn(struct timer_list *t)
		/* No progress during the last interval, tag all
		 * in-flight qcs as timed out and freeze the port.
		 */
		for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) {
		for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
			struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
			if (qc)
				qc->err_mask |= AC_ERR_TIMEOUT;
@@ -1003,7 +1006,8 @@ static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
	/* we're gonna abort all commands, no need for fast drain */
	ata_eh_set_pending(ap, 0);

	for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
	/* include internal tag in iteration */
	for (tag = 0; tag <= ATA_MAX_QUEUE; tag++) {
		struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);

		if (qc && (!link || qc->dev->link == link)) {
@@ -1432,6 +1436,10 @@ static const char *ata_err_string(unsigned int err_mask)
		return "invalid argument";
	if (err_mask & AC_ERR_DEV)
		return "device error";
	if (err_mask & AC_ERR_NCQ)
		return "NCQ error";
	if (err_mask & AC_ERR_NODEV_HINT)
		return "Polling detection error";
	return "unknown error";
}

@@ -1815,10 +1823,10 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
	if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
		int ret = scsi_check_sense(qc->scsicmd);
		/*
		 * SUCCESS here means that the sense code could
		 * SUCCESS here means that the sense code could be
		 * evaluated and should be passed to the upper layers
		 * for correct evaluation.
		 * FAILED means the sense code could not interpreted
		 * FAILED means the sense code could not be interpreted
		 * and the device would need to be reset.
		 * NEEDS_RETRY and ADD_TO_MLQUEUE means that the
		 * command would need to be retried.
@@ -2098,6 +2106,21 @@ static inline int ata_eh_worth_retry(struct ata_queued_cmd *qc)
	return qc->err_mask != AC_ERR_DEV;  /* retry if not dev error */
}

/**
 *      ata_eh_quiet - check if we need to be quiet about a command error
 *      @qc: qc to check
 *
 *      Look at the qc flags anbd its scsi command request flags to determine
 *      if we need to be quiet about the command failure.
 */
static inline bool ata_eh_quiet(struct ata_queued_cmd *qc)
{
	if (qc->scsicmd &&
	    qc->scsicmd->request->rq_flags & RQF_QUIET)
		qc->flags |= ATA_QCFLAG_QUIET;
	return qc->flags & ATA_QCFLAG_QUIET;
}

/**
 *	ata_eh_link_autopsy - analyze error and determine recovery action
 *	@link: host link to perform autopsy on
@@ -2115,7 +2138,7 @@ static void ata_eh_link_autopsy(struct ata_link *link)
	struct ata_eh_context *ehc = &link->eh_context;
	struct ata_device *dev;
	unsigned int all_err_mask = 0, eflags = 0;
	int tag;
	int tag, nr_failed = 0, nr_quiet = 0;
	u32 serror;
	int rc;

@@ -2167,12 +2190,16 @@ static void ata_eh_link_autopsy(struct ata_link *link)
		if (qc->err_mask & ~AC_ERR_OTHER)
			qc->err_mask &= ~AC_ERR_OTHER;

		/* SENSE_VALID trumps dev/unknown error and revalidation */
		/*
		 * SENSE_VALID trumps dev/unknown error and revalidation. Upper
		 * layers will determine whether the command is worth retrying
		 * based on the sense data and device class/type. Otherwise,
		 * determine directly if the command is worth retrying using its
		 * error mask and flags.
		 */
		if (qc->flags & ATA_QCFLAG_SENSE_VALID)
			qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);

		/* determine whether the command is worth retrying */
		if (ata_eh_worth_retry(qc))
		else if (ata_eh_worth_retry(qc))
			qc->flags |= ATA_QCFLAG_RETRY;

		/* accumulate error info */
@@ -2181,8 +2208,17 @@ static void ata_eh_link_autopsy(struct ata_link *link)
		if (qc->flags & ATA_QCFLAG_IO)
			eflags |= ATA_EFLAG_IS_IO;
		trace_ata_eh_link_autopsy_qc(qc);

		/* Count quiet errors */
		if (ata_eh_quiet(qc))
			nr_quiet++;
		nr_failed++;
	}

	/* If all failed commands requested silence, then be quiet */
	if (nr_quiet == nr_failed)
		ehc->i.flags |= ATA_EHI_QUIET;

	/* enforce default EH actions */
	if (ap->pflags & ATA_PFLAG_FROZEN ||
	    all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT))
Loading