summaryrefslogtreecommitdiffstats
path: root/drivers/s390/block
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390/block')
-rw-r--r--drivers/s390/block/dasd.c149
-rw-r--r--drivers/s390/block/dasd_3990_erp.c2
-rw-r--r--drivers/s390/block/dasd_ioctl.c1
3 files changed, 63 insertions, 89 deletions
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index c1cf277d0..1161bbd7e 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -725,18 +725,20 @@ static void dasd_profile_start(struct dasd_block *block,
* we count each request only once.
*/
device = cqr->startdev;
- if (device->profile.data) {
- counter = 1; /* request is not yet queued on the start device */
- list_for_each(l, &device->ccw_queue)
- if (++counter >= 31)
- break;
- }
+ if (!device->profile.data)
+ return;
+
+ spin_lock(get_ccwdev_lock(device->cdev));
+ counter = 1; /* request is not yet queued on the start device */
+ list_for_each(l, &device->ccw_queue)
+ if (++counter >= 31)
+ break;
+ spin_unlock(get_ccwdev_lock(device->cdev));
+
spin_lock(&device->profile.lock);
- if (device->profile.data) {
- device->profile.data->dasd_io_nr_req[counter]++;
- if (rq_data_dir(req) == READ)
- device->profile.data->dasd_read_nr_req[counter]++;
- }
+ device->profile.data->dasd_io_nr_req[counter]++;
+ if (rq_data_dir(req) == READ)
+ device->profile.data->dasd_read_nr_req[counter]++;
spin_unlock(&device->profile.lock);
}
@@ -2826,41 +2828,32 @@ static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data)
* Requeue a request back to the block request queue
* only works for block requests
*/
-static int _dasd_requeue_request(struct dasd_ccw_req *cqr)
+static void _dasd_requeue_request(struct dasd_ccw_req *cqr)
{
- struct dasd_block *block = cqr->block;
struct request *req;
- if (!block)
- return -EINVAL;
/*
* If the request is an ERP request there is nothing to requeue.
* This will be done with the remaining original request.
*/
if (cqr->refers)
- return 0;
+ return;
spin_lock_irq(&cqr->dq->lock);
req = (struct request *) cqr->callback_data;
blk_mq_requeue_request(req, true);
spin_unlock_irq(&cqr->dq->lock);
- return 0;
+ return;
}
-/*
- * Go through all request on the dasd_block request queue, cancel them
- * on the respective dasd_device, and return them to the generic
- * block layer.
- */
-static int dasd_flush_block_queue(struct dasd_block *block)
+static int _dasd_requests_to_flushqueue(struct dasd_block *block,
+ struct list_head *flush_queue)
{
struct dasd_ccw_req *cqr, *n;
- int rc, i;
- struct list_head flush_queue;
unsigned long flags;
+ int rc, i;
- INIT_LIST_HEAD(&flush_queue);
- spin_lock_bh(&block->queue_lock);
+ spin_lock_irqsave(&block->queue_lock, flags);
rc = 0;
restart:
list_for_each_entry_safe(cqr, n, &block->ccw_queue, blocklist) {
@@ -2875,13 +2868,32 @@ restart:
* is returned from the dasd_device layer.
*/
cqr->callback = _dasd_wake_block_flush_cb;
- for (i = 0; cqr != NULL; cqr = cqr->refers, i++)
- list_move_tail(&cqr->blocklist, &flush_queue);
+ for (i = 0; cqr; cqr = cqr->refers, i++)
+ list_move_tail(&cqr->blocklist, flush_queue);
if (i > 1)
/* moved more than one request - need to restart */
goto restart;
}
- spin_unlock_bh(&block->queue_lock);
+ spin_unlock_irqrestore(&block->queue_lock, flags);
+
+ return rc;
+}
+
+/*
+ * Go through all request on the dasd_block request queue, cancel them
+ * on the respective dasd_device, and return them to the generic
+ * block layer.
+ */
+static int dasd_flush_block_queue(struct dasd_block *block)
+{
+ struct dasd_ccw_req *cqr, *n;
+ struct list_head flush_queue;
+ unsigned long flags;
+ int rc;
+
+ INIT_LIST_HEAD(&flush_queue);
+ rc = _dasd_requests_to_flushqueue(block, &flush_queue);
+
/* Now call the callback function of flushed requests */
restart_cb:
list_for_each_entry_safe(cqr, n, &flush_queue, blocklist) {
@@ -3832,75 +3844,36 @@ EXPORT_SYMBOL_GPL(dasd_generic_verify_path);
*/
static int dasd_generic_requeue_all_requests(struct dasd_device *device)
{
+ struct dasd_block *block = device->block;
struct list_head requeue_queue;
struct dasd_ccw_req *cqr, *n;
- struct dasd_ccw_req *refers;
int rc;
- INIT_LIST_HEAD(&requeue_queue);
- spin_lock_irq(get_ccwdev_lock(device->cdev));
- rc = 0;
- list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) {
- /* Check status and move request to flush_queue */
- if (cqr->status == DASD_CQR_IN_IO) {
- rc = device->discipline->term_IO(cqr);
- if (rc) {
- /* unable to terminate requeust */
- dev_err(&device->cdev->dev,
- "Unable to terminate request %p "
- "on suspend\n", cqr);
- spin_unlock_irq(get_ccwdev_lock(device->cdev));
- dasd_put_device(device);
- return rc;
- }
- }
- list_move_tail(&cqr->devlist, &requeue_queue);
- }
- spin_unlock_irq(get_ccwdev_lock(device->cdev));
-
- list_for_each_entry_safe(cqr, n, &requeue_queue, devlist) {
- wait_event(dasd_flush_wq,
- (cqr->status != DASD_CQR_CLEAR_PENDING));
+ if (!block)
+ return 0;
- /*
- * requeue requests to blocklayer will only work
- * for block device requests
- */
- if (_dasd_requeue_request(cqr))
- continue;
+ INIT_LIST_HEAD(&requeue_queue);
+ rc = _dasd_requests_to_flushqueue(block, &requeue_queue);
- /* remove requests from device and block queue */
- list_del_init(&cqr->devlist);
- while (cqr->refers != NULL) {
- refers = cqr->refers;
- /* remove the request from the block queue */
- list_del(&cqr->blocklist);
- /* free the finished erp request */
- dasd_free_erp_request(cqr, cqr->memdev);
- cqr = refers;
+ /* Now call the callback function of flushed requests */
+restart_cb:
+ list_for_each_entry_safe(cqr, n, &requeue_queue, blocklist) {
+ wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED));
+ /* Process finished ERP request. */
+ if (cqr->refers) {
+ spin_lock_bh(&block->queue_lock);
+ __dasd_process_erp(block->base, cqr);
+ spin_unlock_bh(&block->queue_lock);
+ /* restart list_for_xx loop since dasd_process_erp
+ * might remove multiple elements
+ */
+ goto restart_cb;
}
-
- /*
- * _dasd_requeue_request already checked for a valid
- * blockdevice, no need to check again
- * all erp requests (cqr->refers) have a cqr->block
- * pointer copy from the original cqr
- */
+ _dasd_requeue_request(cqr);
list_del_init(&cqr->blocklist);
cqr->block->base->discipline->free_cp(
cqr, (struct request *) cqr->callback_data);
}
-
- /*
- * if requests remain then they are internal request
- * and go back to the device queue
- */
- if (!list_empty(&requeue_queue)) {
- /* move freeze_queue to start of the ccw_queue */
- spin_lock_irq(get_ccwdev_lock(device->cdev));
- list_splice_tail(&requeue_queue, &device->ccw_queue);
- spin_unlock_irq(get_ccwdev_lock(device->cdev));
- }
dasd_schedule_device_bh(device);
return rc;
}
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index ee73b0607..8598c792d 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -2436,7 +2436,7 @@ static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr)
erp->block = cqr->block;
erp->magic = cqr->magic;
erp->expires = cqr->expires;
- erp->retries = 256;
+ erp->retries = device->default_retries;
erp->buildclk = get_tod_clock();
erp->status = DASD_CQR_FILLED;
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index 2016e0ed5..9f3f48313 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -137,6 +137,7 @@ static int dasd_ioctl_resume(struct dasd_block *block)
spin_unlock_irqrestore(get_ccwdev_lock(base->cdev), flags);
dasd_schedule_block_bh(block);
+ dasd_schedule_device_bh(base);
return 0;
}