Commit 5f384338 authored by Michael Holzheu's avatar Michael Holzheu Committed by Linus Torvalds
Browse files

[PATCH] s390: fix endless retry loop in tape driver



If a tape device is assigned to another host, the interrupt for the assign
operation comes back with deferred condition code 1.  Under some conditions
this can lead to an endless loop of retries.  Check if the current request is
still in IO in deferred condition code handling and prevent retries when the
request has already been cancelled.

Signed-off-by: default avatarMichael Holzheu <holzheu@de.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 4cd190a7
Loading
Loading
Loading
Loading
+1 −0
Original line number Diff line number Diff line
@@ -250,6 +250,7 @@ extern void tape_free_request(struct tape_request *);
extern int tape_do_io(struct tape_device *, struct tape_request *);
extern int tape_do_io_async(struct tape_device *, struct tape_request *);
extern int tape_do_io_interruptible(struct tape_device *, struct tape_request *);
extern int tape_cancel_io(struct tape_device *, struct tape_request *);
void tape_hotplug_event(struct tape_device *, int major, int action);

static inline int
+27 −5
Original line number Diff line number Diff line
@@ -761,6 +761,13 @@ __tape_start_next_request(struct tape_device *device)
		 */
		if (request->status == TAPE_REQUEST_IN_IO)
			return;
		/*
		 * Request has already been stopped. We have to wait until
		 * the request is removed from the queue in the interrupt
		 * handling.
		 */
		if (request->status == TAPE_REQUEST_DONE)
			return;

		/*
		 * We wanted to cancel the request but the common I/O layer
@@ -1023,6 +1030,20 @@ tape_do_io_interruptible(struct tape_device *device,
	return rc;
}

/*
 * Stop running ccw.
 */
int
tape_cancel_io(struct tape_device *device, struct tape_request *request)
{
	int rc;

	spin_lock_irq(get_ccwdev_lock(device->cdev));
	rc = __tape_cancel_io(device, request);
	spin_unlock_irq(get_ccwdev_lock(device->cdev));
	return rc;
}

/*
 * Tape interrupt routine, called from the ccw_device layer
 */
@@ -1068,12 +1089,12 @@ __tape_do_irq (struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
	 * error might still apply. So we just schedule the request to be
	 * started later.
	 */
	if (irb->scsw.cc != 0 && (irb->scsw.fctl & SCSW_FCTL_START_FUNC)) {
		PRINT_WARN("(%s): deferred cc=%i. restaring\n",
			cdev->dev.bus_id,
			irb->scsw.cc);
	if (irb->scsw.cc != 0 && (irb->scsw.fctl & SCSW_FCTL_START_FUNC) &&
	    (request->status == TAPE_REQUEST_IN_IO)) {
		DBF_EVENT(3,"(%08x): deferred cc=%i, fctl=%i. restarting\n",
			device->cdev_id, irb->scsw.cc, irb->scsw.fctl);
		request->status = TAPE_REQUEST_QUEUED;
		schedule_work(&device->tape_dnr);
		schedule_delayed_work(&device->tape_dnr, HZ);
		return;
	}

@@ -1287,4 +1308,5 @@ EXPORT_SYMBOL(tape_dump_sense_dbf);
EXPORT_SYMBOL(tape_do_io);
EXPORT_SYMBOL(tape_do_io_async);
EXPORT_SYMBOL(tape_do_io_interruptible);
EXPORT_SYMBOL(tape_cancel_io);
EXPORT_SYMBOL(tape_mtop);
+7 −8
Original line number Diff line number Diff line
@@ -37,20 +37,19 @@ tape_std_assign_timeout(unsigned long data)
{
	struct tape_request *	request;
	struct tape_device *	device;
	int rc;

	request = (struct tape_request *) data;
	if ((device = request->device) == NULL)
		BUG();

	spin_lock_irq(get_ccwdev_lock(device->cdev));
	if (request->callback != NULL) {
	DBF_EVENT(3, "%08x: Assignment timeout. Device busy.\n",
			device->cdev_id);
		PRINT_ERR("%s: Assignment timeout. Device busy.\n",
			device->cdev->dev.bus_id);
		ccw_device_clear(device->cdev, (long) request);
	}
	spin_unlock_irq(get_ccwdev_lock(device->cdev));
	rc = tape_cancel_io(device, request);
	if(rc)
		PRINT_ERR("(%s): Assign timeout: Cancel failed with rc = %i\n",
			device->cdev->dev.bus_id, rc);

}

int