summaryrefslogtreecommitdiffstats
path: root/drivers/s390
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390')
-rw-r--r--drivers/s390/block/dasd.c139
-rw-r--r--drivers/s390/block/dasd_3990_erp.c94
-rw-r--r--drivers/s390/block/dasd_alias.c14
-rw-r--r--drivers/s390/block/dasd_devmap.c34
-rw-r--r--drivers/s390/block/dasd_diag.c26
-rw-r--r--drivers/s390/block/dasd_eckd.c304
-rw-r--r--drivers/s390/block/dasd_eer.c9
-rw-r--r--drivers/s390/block/dasd_erp.c9
-rw-r--r--drivers/s390/block/dasd_fba.c120
-rw-r--r--drivers/s390/block/dasd_genhd.c54
-rw-r--r--drivers/s390/block/dasd_int.h37
-rw-r--r--drivers/s390/block/dasd_ioctl.c8
-rw-r--r--drivers/s390/block/dasd_proc.c5
-rw-r--r--drivers/s390/block/dcssblk.c23
-rw-r--r--drivers/s390/block/scm_blk.c23
-rw-r--r--drivers/s390/char/con3215.c4
-rw-r--r--drivers/s390/char/fs3270.c14
-rw-r--r--drivers/s390/char/raw3270.c44
-rw-r--r--drivers/s390/char/raw3270.h2
-rw-r--r--drivers/s390/char/sclp_cmd.c44
-rw-r--r--drivers/s390/char/tape.h12
-rw-r--r--drivers/s390/char/tape_class.c17
-rw-r--r--drivers/s390/char/vmlogrdr.c18
-rw-r--r--drivers/s390/char/vmur.c22
-rw-r--r--drivers/s390/char/zcore.c1
-rw-r--r--drivers/s390/cio/ccwgroup.c8
-rw-r--r--drivers/s390/cio/chsc.c16
-rw-r--r--drivers/s390/cio/chsc.h6
-rw-r--r--drivers/s390/cio/chsc_sch.c20
-rw-r--r--drivers/s390/cio/cio.c4
-rw-r--r--drivers/s390/cio/cmf.c6
-rw-r--r--drivers/s390/cio/css.c29
-rw-r--r--drivers/s390/cio/device.c4
-rw-r--r--drivers/s390/cio/device_fsm.c13
-rw-r--r--drivers/s390/cio/device_id.c2
-rw-r--r--drivers/s390/cio/device_ops.c13
-rw-r--r--drivers/s390/cio/device_pgid.c8
-rw-r--r--drivers/s390/cio/device_status.c2
-rw-r--r--drivers/s390/cio/eadm_sch.c4
-rw-r--r--drivers/s390/cio/fcx.c22
-rw-r--r--drivers/s390/cio/orb.h9
-rw-r--r--drivers/s390/cio/qdio_main.c12
-rw-r--r--drivers/s390/cio/qdio_setup.c10
-rw-r--r--drivers/s390/cio/qdio_thinint.c6
-rw-r--r--drivers/s390/cio/scm.c4
-rw-r--r--drivers/s390/cio/vfio_ccw_cp.c82
-rw-r--r--drivers/s390/cio/vfio_ccw_fsm.c2
-rw-r--r--drivers/s390/crypto/ap_bus.c267
-rw-r--r--drivers/s390/crypto/ap_bus.h8
-rw-r--r--drivers/s390/crypto/ap_debug.h4
-rw-r--r--drivers/s390/crypto/ap_queue.c31
-rw-r--r--drivers/s390/crypto/pkey_api.c226
-rw-r--r--drivers/s390/crypto/vfio_ap_drv.c2
-rw-r--r--drivers/s390/crypto/vfio_ap_ops.c35
-rw-r--r--drivers/s390/crypto/zcrypt_api.c265
-rw-r--r--drivers/s390/crypto/zcrypt_api.h9
-rw-r--r--drivers/s390/crypto/zcrypt_ccamisc.c220
-rw-r--r--drivers/s390/crypto/zcrypt_debug.h4
-rw-r--r--drivers/s390/crypto/zcrypt_ep11misc.c173
-rw-r--r--drivers/s390/crypto/zcrypt_error.h5
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype50.c14
-rw-r--r--drivers/s390/crypto/zcrypt_msgtype6.c45
-rw-r--r--drivers/s390/net/ctcm_fsms.c4
-rw-r--r--drivers/s390/net/ctcm_main.c2
-rw-r--r--drivers/s390/net/ctcm_mpc.c20
-rw-r--r--drivers/s390/net/fsm.c2
-rw-r--r--drivers/s390/net/lcs.c12
-rw-r--r--drivers/s390/net/qeth_core.h9
-rw-r--r--drivers/s390/net/qeth_core_main.c24
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c2
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c4
-rw-r--r--drivers/s390/scsi/zfcp_qdio.h6
-rw-r--r--drivers/s390/virtio/virtio_ccw.c170
73 files changed, 1490 insertions, 1432 deletions
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 30851faade..0a97cfedd7 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -27,9 +27,6 @@
#include <asm/itcw.h>
#include <asm/diag.h>
-/* This is ugly... */
-#define PRINTK_HEADER "dasd:"
-
#include "dasd_int.h"
/*
* SECTION: Constant definitions to be used within this file
@@ -310,39 +307,57 @@ static int dasd_state_basic_to_known(struct dasd_device *device)
*/
static int dasd_state_basic_to_ready(struct dasd_device *device)
{
- int rc;
- struct dasd_block *block;
- struct gendisk *disk;
+ struct dasd_block *block = device->block;
+ struct queue_limits lim;
+ int rc = 0;
- rc = 0;
- block = device->block;
/* make disk known with correct capacity */
- if (block) {
- if (block->base->discipline->do_analysis != NULL)
- rc = block->base->discipline->do_analysis(block);
- if (rc) {
- if (rc != -EAGAIN) {
- device->state = DASD_STATE_UNFMT;
- disk = device->block->gdp;
- kobject_uevent(&disk_to_dev(disk)->kobj,
- KOBJ_CHANGE);
- goto out;
- }
- return rc;
- }
- if (device->discipline->setup_blk_queue)
- device->discipline->setup_blk_queue(block);
- set_capacity(block->gdp,
- block->blocks << block->s2b_shift);
+ if (!block) {
device->state = DASD_STATE_READY;
- rc = dasd_scan_partitions(block);
- if (rc) {
- device->state = DASD_STATE_BASIC;
+ goto out;
+ }
+
+ if (block->base->discipline->do_analysis != NULL)
+ rc = block->base->discipline->do_analysis(block);
+ if (rc) {
+ if (rc == -EAGAIN)
return rc;
- }
- } else {
- device->state = DASD_STATE_READY;
+ device->state = DASD_STATE_UNFMT;
+ kobject_uevent(&disk_to_dev(device->block->gdp)->kobj,
+ KOBJ_CHANGE);
+ goto out;
+ }
+
+ lim = queue_limits_start_update(block->gdp->queue);
+ lim.max_dev_sectors = device->discipline->max_sectors(block);
+ lim.max_hw_sectors = lim.max_dev_sectors;
+ lim.logical_block_size = block->bp_block;
+
+ if (device->discipline->has_discard) {
+ unsigned int max_bytes;
+
+ lim.discard_granularity = block->bp_block;
+
+ /* Calculate max_discard_sectors and make it PAGE aligned */
+ max_bytes = USHRT_MAX * block->bp_block;
+ max_bytes = ALIGN_DOWN(max_bytes, PAGE_SIZE);
+
+ lim.max_hw_discard_sectors = max_bytes / block->bp_block;
+ lim.max_write_zeroes_sectors = lim.max_hw_discard_sectors;
+ }
+ rc = queue_limits_commit_update(block->gdp->queue, &lim);
+ if (rc)
+ return rc;
+
+ set_capacity(block->gdp, block->blocks << block->s2b_shift);
+ device->state = DASD_STATE_READY;
+
+ rc = dasd_scan_partitions(block);
+ if (rc) {
+ device->state = DASD_STATE_BASIC;
+ return rc;
}
+
out:
if (device->discipline->basic_to_ready)
rc = device->discipline->basic_to_ready(device);
@@ -409,7 +424,7 @@ dasd_state_ready_to_online(struct dasd_device * device)
KOBJ_CHANGE);
return 0;
}
- disk_uevent(device->block->bdev_handle->bdev->bd_disk,
+ disk_uevent(file_bdev(device->block->bdev_file)->bd_disk,
KOBJ_CHANGE);
}
return 0;
@@ -430,7 +445,7 @@ static int dasd_state_online_to_ready(struct dasd_device *device)
device->state = DASD_STATE_READY;
if (device->block && !(device->features & DASD_FEATURE_USERAW))
- disk_uevent(device->block->bdev_handle->bdev->bd_disk,
+ disk_uevent(file_bdev(device->block->bdev_file)->bd_disk,
KOBJ_CHANGE);
return 0;
}
@@ -1298,7 +1313,6 @@ int dasd_term_IO(struct dasd_ccw_req *cqr)
{
struct dasd_device *device;
int retries, rc;
- char errorstring[ERRORLENGTH];
/* Check the cqr */
rc = dasd_check_cqr(cqr);
@@ -1337,10 +1351,8 @@ int dasd_term_IO(struct dasd_ccw_req *cqr)
rc = 0;
break;
default:
- /* internal error 10 - unknown rc*/
- snprintf(errorstring, ERRORLENGTH, "10 %d", rc);
- dev_err(&device->cdev->dev, "An error occurred in the "
- "DASD device driver, reason=%s\n", errorstring);
+ dev_err(&device->cdev->dev,
+ "Unexpected error during request termination %d\n", rc);
BUG();
break;
}
@@ -1359,7 +1371,6 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
{
struct dasd_device *device;
int rc;
- char errorstring[ERRORLENGTH];
/* Check the cqr */
rc = dasd_check_cqr(cqr);
@@ -1379,10 +1390,8 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
return -EPERM;
}
if (cqr->retries < 0) {
- /* internal error 14 - start_IO run out of retries */
- sprintf(errorstring, "14 %p", cqr);
- dev_err(&device->cdev->dev, "An error occurred in the DASD "
- "device driver, reason=%s\n", errorstring);
+ dev_err(&device->cdev->dev,
+ "Start I/O ran out of retries\n");
cqr->status = DASD_CQR_ERROR;
return -EIO;
}
@@ -1460,11 +1469,8 @@ int dasd_start_IO(struct dasd_ccw_req *cqr)
"not accessible");
break;
default:
- /* internal error 11 - unknown rc */
- snprintf(errorstring, ERRORLENGTH, "11 %d", rc);
dev_err(&device->cdev->dev,
- "An error occurred in the DASD device driver, "
- "reason=%s\n", errorstring);
+ "Unexpected error during request start %d", rc);
BUG();
break;
}
@@ -1901,8 +1907,6 @@ static void __dasd_device_process_ccw_queue(struct dasd_device *device,
static void __dasd_process_cqr(struct dasd_device *device,
struct dasd_ccw_req *cqr)
{
- char errorstring[ERRORLENGTH];
-
switch (cqr->status) {
case DASD_CQR_SUCCESS:
cqr->status = DASD_CQR_DONE;
@@ -1914,11 +1918,8 @@ static void __dasd_process_cqr(struct dasd_device *device,
cqr->status = DASD_CQR_TERMINATED;
break;
default:
- /* internal error 12 - wrong cqr status*/
- snprintf(errorstring, ERRORLENGTH, "12 %p %x02", cqr, cqr->status);
dev_err(&device->cdev->dev,
- "An error occurred in the DASD device driver, "
- "reason=%s\n", errorstring);
+ "Unexpected CQR status %02x", cqr->status);
BUG();
}
if (cqr->callback)
@@ -1983,16 +1984,14 @@ static void __dasd_device_check_expire(struct dasd_device *device)
if (device->discipline->term_IO(cqr) != 0) {
/* Hmpf, try again in 5 sec */
dev_err(&device->cdev->dev,
- "cqr %p timed out (%lus) but cannot be "
- "ended, retrying in 5 s\n",
- cqr, (cqr->expires/HZ));
+ "CQR timed out (%lus) but cannot be ended, retrying in 5s\n",
+ (cqr->expires / HZ));
cqr->expires += 5*HZ;
dasd_device_set_timer(device, 5*HZ);
} else {
dev_err(&device->cdev->dev,
- "cqr %p timed out (%lus), %i retries "
- "remaining\n", cqr, (cqr->expires/HZ),
- cqr->retries);
+ "CQR timed out (%lus), %i retries remaining\n",
+ (cqr->expires / HZ), cqr->retries);
}
__dasd_device_check_autoquiesce_timeout(device, cqr);
}
@@ -2113,8 +2112,7 @@ int dasd_flush_device_queue(struct dasd_device *device)
if (rc) {
/* unable to terminate requeust */
dev_err(&device->cdev->dev,
- "Flushing the DASD request queue "
- "failed for request %p\n", cqr);
+ "Flushing the DASD request queue failed\n");
/* stop flush processing */
goto finished;
}
@@ -2630,8 +2628,7 @@ static int __dasd_cancel_req(struct dasd_ccw_req *cqr)
rc = device->discipline->term_IO(cqr);
if (rc) {
dev_err(&device->cdev->dev,
- "Cancelling request %p failed with rc=%d\n",
- cqr, rc);
+ "Cancelling request failed with rc=%d\n", rc);
} else {
cqr->stopclk = get_tod_clock();
}
@@ -3584,7 +3581,7 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
* in the other openers.
*/
if (device->block) {
- max_count = device->block->bdev_handle ? 0 : -1;
+ max_count = device->block->bdev_file ? 0 : -1;
open_count = atomic_read(&device->block->open_count);
if (open_count > max_count) {
if (open_count > 0)
@@ -3629,8 +3626,8 @@ int dasd_generic_set_offline(struct ccw_device *cdev)
* so sync bdev first and then wait for our queues to become
* empty
*/
- if (device->block && device->block->bdev_handle)
- bdev_mark_dead(device->block->bdev_handle->bdev, false);
+ if (device->block && device->block->bdev_file)
+ bdev_mark_dead(file_bdev(device->block->bdev_file), false);
dasd_schedule_device_bh(device);
rc = wait_event_interruptible(shutdown_waitq,
_wait_for_empty_queues(device));
@@ -3972,16 +3969,14 @@ static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device,
NULL);
if (IS_ERR(cqr)) {
- /* internal error 13 - Allocating the RDC request failed*/
- dev_err(&device->cdev->dev,
- "An error occurred in the DASD device driver, "
- "reason=%s\n", "13");
+ DBF_EVENT_DEVID(DBF_WARNING, device->cdev, "%s",
+ "Could not allocate RDC request");
return cqr;
}
ccw = cqr->cpaddr;
ccw->cmd_code = CCW_CMD_RDC;
- ccw->cda = (__u32)virt_to_phys(cqr->data);
+ ccw->cda = virt_to_dma32(cqr->data);
ccw->flags = 0;
ccw->count = rdc_buffer_size;
cqr->startdev = device;
@@ -4025,7 +4020,7 @@ char *dasd_get_sense(struct irb *irb)
if (scsw_is_tm(&irb->scsw) && (irb->scsw.tm.fcxs == 0x01)) {
if (irb->scsw.tm.tcw)
- tsb = tcw_get_tsb(phys_to_virt(irb->scsw.tm.tcw));
+ tsb = tcw_get_tsb(dma32_to_virt(irb->scsw.tm.tcw));
if (tsb && tsb->length == 64 && tsb->flags)
switch (tsb->flags & 0x07) {
case 1: /* tsa_iostat */
diff --git a/drivers/s390/block/dasd_3990_erp.c b/drivers/s390/block/dasd_3990_erp.c
index 89957bb724..bbbacfc386 100644
--- a/drivers/s390/block/dasd_3990_erp.c
+++ b/drivers/s390/block/dasd_3990_erp.c
@@ -7,13 +7,9 @@
*
*/
-#define KMSG_COMPONENT "dasd-eckd"
-
#include <linux/timer.h>
#include <asm/idals.h>
-#define PRINTK_HEADER "dasd_erp(3990): "
-
#include "dasd_int.h"
#include "dasd_eckd.h"
@@ -220,7 +216,7 @@ dasd_3990_erp_DCTL(struct dasd_ccw_req * erp, char modifier)
memset(ccw, 0, sizeof(struct ccw1));
ccw->cmd_code = CCW_CMD_DCTL;
ccw->count = 4;
- ccw->cda = (__u32)virt_to_phys(DCTL_data);
+ ccw->cda = virt_to_dma32(DCTL_data);
dctl_cqr->flags = erp->flags;
dctl_cqr->function = dasd_3990_erp_DCTL;
dctl_cqr->refers = erp;
@@ -398,7 +394,6 @@ dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense)
struct dasd_device *device = erp->startdev;
char msg_format = (sense[7] & 0xF0);
char msg_no = (sense[7] & 0x0F);
- char errorstring[ERRORLENGTH];
switch (msg_format) {
case 0x00: /* Format 0 - Program or System Checks */
@@ -1004,12 +999,9 @@ dasd_3990_handle_env_data(struct dasd_ccw_req * erp, char *sense)
}
break;
- default: /* unknown message format - should not happen
- internal error 03 - unknown message format */
- snprintf(errorstring, ERRORLENGTH, "03 %x02", msg_format);
+ default:
dev_err(&device->cdev->dev,
- "An error occurred in the DASD device driver, "
- "reason=%s\n", errorstring);
+ "Unknown message format %02x", msg_format);
break;
} /* end switch message format */
@@ -1056,11 +1048,9 @@ dasd_3990_erp_com_rej(struct dasd_ccw_req * erp, char *sense)
set_bit(DASD_CQR_SUPPRESS_CR, &erp->refers->flags);
erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
} else {
- /* fatal error - set status to FAILED
- internal error 09 - Command Reject */
if (!test_bit(DASD_CQR_SUPPRESS_CR, &erp->flags))
dev_err(&device->cdev->dev,
- "An error occurred in the DASD device driver, reason=09\n");
+ "An I/O command request was rejected\n");
erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
}
@@ -1128,13 +1118,7 @@ dasd_3990_erp_equip_check(struct dasd_ccw_req * erp, char *sense)
erp->function = dasd_3990_erp_equip_check;
if (sense[1] & SNS1_WRITE_INHIBITED) {
- dev_info(&device->cdev->dev,
- "Write inhibited path encountered\n");
-
- /* vary path offline
- internal error 04 - Path should be varied off-line.*/
- dev_err(&device->cdev->dev, "An error occurred in the DASD "
- "device driver, reason=%s\n", "04");
+ dev_err(&device->cdev->dev, "Write inhibited path encountered\n");
erp = dasd_3990_erp_action_1(erp);
@@ -1285,11 +1269,7 @@ dasd_3990_erp_inv_format(struct dasd_ccw_req * erp, char *sense)
erp = dasd_3990_erp_action_4(erp, sense);
} else {
- /* internal error 06 - The track format is not valid*/
- dev_err(&device->cdev->dev,
- "An error occurred in the DASD device driver, "
- "reason=%s\n", "06");
-
+ dev_err(&device->cdev->dev, "Track format is not valid\n");
erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
}
@@ -1609,7 +1589,7 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
{
struct dasd_device *device = default_erp->startdev;
- __u32 cpa = 0;
+ dma32_t cpa = 0;
struct dasd_ccw_req *cqr;
struct dasd_ccw_req *erp;
struct DE_eckd_data *DE_data;
@@ -1663,9 +1643,8 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
sizeof(struct LO_eckd_data), device);
if (IS_ERR(erp)) {
- /* internal error 01 - Unable to allocate ERP */
- dev_err(&device->cdev->dev, "An error occurred in the DASD "
- "device driver, reason=%s\n", "01");
+ DBF_DEV_EVENT(DBF_ERR, device, "%s",
+ "Unable to allocate ERP request (1B 32)");
return dasd_3990_erp_cleanup(default_erp, DASD_CQR_FAILED);
}
@@ -1714,7 +1693,7 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT;
ccw->flags = CCW_FLAG_CC;
ccw->count = 16;
- ccw->cda = (__u32)virt_to_phys(DE_data);
+ ccw->cda = virt_to_dma32(DE_data);
/* create LO ccw */
ccw++;
@@ -1722,7 +1701,7 @@ dasd_3990_erp_action_1B_32(struct dasd_ccw_req * default_erp, char *sense)
ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD;
ccw->flags = CCW_FLAG_CC;
ccw->count = 16;
- ccw->cda = (__u32)virt_to_phys(LO_data);
+ ccw->cda = virt_to_dma32(LO_data);
/* TIC to the failed ccw */
ccw++;
@@ -1768,7 +1747,7 @@ dasd_3990_update_1B(struct dasd_ccw_req * previous_erp, char *sense)
{
struct dasd_device *device = previous_erp->startdev;
- __u32 cpa = 0;
+ dma32_t cpa = 0;
struct dasd_ccw_req *cqr;
struct dasd_ccw_req *erp;
char *LO_data; /* struct LO_eckd_data */
@@ -1807,10 +1786,8 @@ dasd_3990_update_1B(struct dasd_ccw_req * previous_erp, char *sense)
cpa = previous_erp->irb.scsw.cmd.cpa;
if (cpa == 0) {
- /* internal error 02 -
- Unable to determine address of the CCW to be restarted */
- dev_err(&device->cdev->dev, "An error occurred in the DASD "
- "device driver, reason=%s\n", "02");
+ dev_err(&device->cdev->dev,
+ "Unable to determine address of to be restarted CCW\n");
previous_erp->status = DASD_CQR_FAILED;
@@ -2009,15 +1986,9 @@ dasd_3990_erp_compound_config(struct dasd_ccw_req * erp, char *sense)
{
if ((sense[25] & DASD_SENSE_BIT_1) && (sense[26] & DASD_SENSE_BIT_2)) {
-
- /* set to suspended duplex state then restart
- internal error 05 - Set device to suspended duplex state
- should be done */
struct dasd_device *device = erp->startdev;
dev_err(&device->cdev->dev,
- "An error occurred in the DASD device driver, "
- "reason=%s\n", "05");
-
+ "Compound configuration error occurred\n");
}
erp->function = dasd_3990_erp_compound_config;
@@ -2153,10 +2124,9 @@ dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense)
erp = dasd_3990_erp_int_req(erp);
break;
- case 0x0F: /* length mismatch during update write command
- internal error 08 - update write command error*/
- dev_err(&device->cdev->dev, "An error occurred in the "
- "DASD device driver, reason=%s\n", "08");
+ case 0x0F:
+ dev_err(&device->cdev->dev,
+ "Update write command error occurred\n");
erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
break;
@@ -2165,12 +2135,9 @@ dasd_3990_erp_inspect_32(struct dasd_ccw_req * erp, char *sense)
erp = dasd_3990_erp_action_10_32(erp, sense);
break;
- case 0x15: /* next track outside defined extend
- internal error 07 - The next track is not
- within the defined storage extent */
+ case 0x15:
dev_err(&device->cdev->dev,
- "An error occurred in the DASD device driver, "
- "reason=%s\n", "07");
+ "Track outside defined extent error occurred\n");
erp = dasd_3990_erp_cleanup(erp, DASD_CQR_FAILED);
break;
@@ -2419,7 +2386,7 @@ static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr)
tcw = erp->cpaddr;
tsb = (struct tsb *) &tcw[1];
*tcw = *((struct tcw *)cqr->cpaddr);
- tcw->tsb = virt_to_phys(tsb);
+ tcw->tsb = virt_to_dma64(tsb);
} else if (ccw->cmd_code == DASD_ECKD_CCW_PSF) {
/* PSF cannot be chained from NOOP/TIC */
erp->cpaddr = cqr->cpaddr;
@@ -2430,7 +2397,7 @@ static struct dasd_ccw_req *dasd_3990_erp_add_erp(struct dasd_ccw_req *cqr)
ccw->flags = CCW_FLAG_CC;
ccw++;
ccw->cmd_code = CCW_CMD_TIC;
- ccw->cda = (__u32)virt_to_phys(cqr->cpaddr);
+ ccw->cda = virt_to_dma32(cqr->cpaddr);
}
erp->flags = cqr->flags;
@@ -2663,7 +2630,7 @@ dasd_3990_erp_further_erp(struct dasd_ccw_req *erp)
* necessary
*/
dev_err(&device->cdev->dev,
- "ERP %p has run out of retries and failed\n", erp);
+ "ERP %px has run out of retries and failed\n", erp);
erp->status = DASD_CQR_FAILED;
}
@@ -2704,8 +2671,7 @@ dasd_3990_erp_handle_match_erp(struct dasd_ccw_req *erp_head,
while (erp_done != erp) {
if (erp_done == NULL) /* end of chain reached */
- panic(PRINTK_HEADER "Programming error in ERP! The "
- "original request was lost\n");
+ panic("Programming error in ERP! The original request was lost\n");
/* remove the request from the device queue */
list_del(&erp_done->blocklist);
@@ -2786,11 +2752,9 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
"ERP chain at BEGINNING of ERP-ACTION\n");
for (temp_erp = cqr;
temp_erp != NULL; temp_erp = temp_erp->refers) {
-
dev_err(&device->cdev->dev,
- "ERP %p (%02x) refers to %p\n",
- temp_erp, temp_erp->status,
- temp_erp->refers);
+ "ERP %px (%02x) refers to %px\n",
+ temp_erp, temp_erp->status, temp_erp->refers);
}
}
@@ -2837,11 +2801,9 @@ dasd_3990_erp_action(struct dasd_ccw_req * cqr)
"ERP chain at END of ERP-ACTION\n");
for (temp_erp = erp;
temp_erp != NULL; temp_erp = temp_erp->refers) {
-
dev_err(&device->cdev->dev,
- "ERP %p (%02x) refers to %p\n",
- temp_erp, temp_erp->status,
- temp_erp->refers);
+ "ERP %px (%02x) refers to %px\n",
+ temp_erp, temp_erp->status, temp_erp->refers);
}
}
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index c9740ae88d..f7e768d8ca 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -6,20 +6,12 @@
* Author(s): Stefan Weinhuber <wein@de.ibm.com>
*/
-#define KMSG_COMPONENT "dasd-eckd"
-
#include <linux/list.h>
#include <linux/slab.h>
#include <asm/ebcdic.h>
#include "dasd_int.h"
#include "dasd_eckd.h"
-#ifdef PRINTK_HEADER
-#undef PRINTK_HEADER
-#endif /* PRINTK_HEADER */
-#define PRINTK_HEADER "dasd(eckd):"
-
-
/*
* General concept of alias management:
* - PAV and DASD alias management is specific to the eckd discipline.
@@ -443,7 +435,7 @@ static int read_unit_address_configuration(struct dasd_device *device,
ccw->cmd_code = DASD_ECKD_CCW_PSF;
ccw->count = sizeof(struct dasd_psf_prssd_data);
ccw->flags |= CCW_FLAG_CC;
- ccw->cda = (__u32)virt_to_phys(prssdp);
+ ccw->cda = virt_to_dma32(prssdp);
/* Read Subsystem Data - feature codes */
memset(lcu->uac, 0, sizeof(*(lcu->uac)));
@@ -451,7 +443,7 @@ static int read_unit_address_configuration(struct dasd_device *device,
ccw++;
ccw->cmd_code = DASD_ECKD_CCW_RSSD;
ccw->count = sizeof(*(lcu->uac));
- ccw->cda = (__u32)virt_to_phys(lcu->uac);
+ ccw->cda = virt_to_dma32(lcu->uac);
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
@@ -747,7 +739,7 @@ static int reset_summary_unit_check(struct alias_lcu *lcu,
ccw->cmd_code = DASD_ECKD_CCW_RSCK;
ccw->flags = CCW_FLAG_SLI;
ccw->count = 16;
- ccw->cda = (__u32)virt_to_phys(cqr->data);
+ ccw->cda = virt_to_dma32(cqr->data);
((char *)cqr->data)[0] = reason;
clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index c4e36650c4..0316c20823 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -13,8 +13,6 @@
*
*/
-#define KMSG_COMPONENT "dasd"
-
#include <linux/ctype.h>
#include <linux/init.h>
#include <linux/module.h>
@@ -24,8 +22,6 @@
#include <linux/uaccess.h>
#include <asm/ipl.h>
-/* This is ugly... */
-#define PRINTK_HEADER "dasd_devmap:"
#define DASD_MAX_PARAMS 256
#include "dasd_int.h"
@@ -1114,7 +1110,7 @@ dasd_use_diag_show(struct device *dev, struct device_attribute *attr, char *buf)
use_diag = (devmap->features & DASD_FEATURE_USEDIAG) != 0;
else
use_diag = (DASD_FEATURE_DEFAULT & DASD_FEATURE_USEDIAG) != 0;
- return sprintf(buf, use_diag ? "1\n" : "0\n");
+ return sysfs_emit(buf, use_diag ? "1\n" : "0\n");
}
static ssize_t
@@ -1163,7 +1159,7 @@ dasd_use_raw_show(struct device *dev, struct device_attribute *attr, char *buf)
use_raw = (devmap->features & DASD_FEATURE_USERAW) != 0;
else
use_raw = (DASD_FEATURE_DEFAULT & DASD_FEATURE_USERAW) != 0;
- return sprintf(buf, use_raw ? "1\n" : "0\n");
+ return sysfs_emit(buf, use_raw ? "1\n" : "0\n");
}
static ssize_t
@@ -1259,7 +1255,7 @@ dasd_access_show(struct device *dev, struct device_attribute *attr,
if (count < 0)
return count;
- return sprintf(buf, "%d\n", count);
+ return sysfs_emit(buf, "%d\n", count);
}
static DEVICE_ATTR(host_access_count, 0444, dasd_access_show, NULL);
@@ -1338,19 +1334,19 @@ static ssize_t dasd_alias_show(struct device *dev,
device = dasd_device_from_cdev(to_ccwdev(dev));
if (IS_ERR(device))
- return sprintf(buf, "0\n");
+ return sysfs_emit(buf, "0\n");
if (device->discipline && device->discipline->get_uid &&
!device->discipline->get_uid(device, &uid)) {
if (uid.type == UA_BASE_PAV_ALIAS ||
uid.type == UA_HYPER_PAV_ALIAS) {
dasd_put_device(device);
- return sprintf(buf, "1\n");
+ return sysfs_emit(buf, "1\n");
}
}
dasd_put_device(device);
- return sprintf(buf, "0\n");
+ return sysfs_emit(buf, "0\n");
}
static DEVICE_ATTR(alias, 0444, dasd_alias_show, NULL);
@@ -1412,15 +1408,9 @@ dasd_uid_show(struct device *dev, struct device_attribute *attr, char *buf)
break;
}
- if (strlen(uid.vduit) > 0)
- snprintf(uid_string, sizeof(uid_string),
- "%s.%s.%04x.%s.%s",
- uid.vendor, uid.serial, uid.ssid, ua_string,
- uid.vduit);
- else
- snprintf(uid_string, sizeof(uid_string),
- "%s.%s.%04x.%s",
- uid.vendor, uid.serial, uid.ssid, ua_string);
+ snprintf(uid_string, sizeof(uid_string), "%s.%s.%04x.%s%s%s",
+ uid.vendor, uid.serial, uid.ssid, ua_string,
+ uid.vduit[0] ? "." : "", uid.vduit);
}
dasd_put_device(device);
@@ -1862,7 +1852,7 @@ static ssize_t dasd_pm_show(struct device *dev,
device = dasd_device_from_cdev(to_ccwdev(dev));
if (IS_ERR(device))
- return sprintf(buf, "0\n");
+ return sysfs_emit(buf, "0\n");
opm = dasd_path_get_opm(device);
nppm = dasd_path_get_nppm(device);
@@ -1872,8 +1862,8 @@ static ssize_t dasd_pm_show(struct device *dev,
ifccpm = dasd_path_get_ifccpm(device);
dasd_put_device(device);
- return sprintf(buf, "%02x %02x %02x %02x %02x %02x\n", opm, nppm,
- cablepm, cuirpm, hpfpm, ifccpm);
+ return sysfs_emit(buf, "%02x %02x %02x %02x %02x %02x\n", opm, nppm,
+ cablepm, cuirpm, hpfpm, ifccpm);
}
static DEVICE_ATTR(path_masks, 0444, dasd_pm_show, NULL);
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 2e4e555b37..ea4b1d01bb 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -8,8 +8,6 @@
*
*/
-#define KMSG_COMPONENT "dasd"
-
#include <linux/kernel_stat.h>
#include <linux/stddef.h>
#include <linux/kernel.h>
@@ -31,8 +29,6 @@
#include "dasd_int.h"
#include "dasd_diag.h"
-#define PRINTK_HEADER "dasd(diag):"
-
MODULE_LICENSE("GPL");
/* The maximum number of blocks per request (max_blocks) is dependent on the
@@ -621,25 +617,9 @@ dasd_diag_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
"dump sense not available for DIAG data");
}
-/*
- * Initialize block layer request queue.
- */
-static void dasd_diag_setup_blk_queue(struct dasd_block *block)
+static unsigned int dasd_diag_max_sectors(struct dasd_block *block)
{
- unsigned int logical_block_size = block->bp_block;
- struct request_queue *q = block->gdp->queue;
- int max;
-
- max = DIAG_MAX_BLOCKS << block->s2b_shift;
- blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
- q->limits.max_dev_sectors = max;
- blk_queue_logical_block_size(q, logical_block_size);
- blk_queue_max_hw_sectors(q, max);
- blk_queue_max_segments(q, USHRT_MAX);
- /* With page sized segments each segment can be translated into one idaw/tidaw */
- blk_queue_max_segment_size(q, PAGE_SIZE);
- blk_queue_segment_boundary(q, PAGE_SIZE - 1);
- blk_queue_dma_alignment(q, PAGE_SIZE - 1);
+ return DIAG_MAX_BLOCKS << block->s2b_shift;
}
static int dasd_diag_pe_handler(struct dasd_device *device,
@@ -652,10 +632,10 @@ static struct dasd_discipline dasd_diag_discipline = {
.owner = THIS_MODULE,
.name = "DIAG",
.ebcname = "DIAG",
+ .max_sectors = dasd_diag_max_sectors,
.check_device = dasd_diag_check_device,
.pe_handler = dasd_diag_pe_handler,
.fill_geometry = dasd_diag_fill_geometry,
- .setup_blk_queue = dasd_diag_setup_blk_queue,
.start_IO = dasd_start_diag,
.term_IO = dasd_diag_term_IO,
.handle_terminated_request = dasd_diag_handle_terminated_request,
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index bd89b03296..180a008d38 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -10,8 +10,6 @@
* Author.........: Nigel Hislop <hislop_nigel@emc.com>
*/
-#define KMSG_COMPONENT "dasd-eckd"
-
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/slab.h>
@@ -37,11 +35,6 @@
#include "dasd_int.h"
#include "dasd_eckd.h"
-#ifdef PRINTK_HEADER
-#undef PRINTK_HEADER
-#endif /* PRINTK_HEADER */
-#define PRINTK_HEADER "dasd(eckd):"
-
/*
* raw track access always map to 64k in memory
* so it maps to 16 blocks of 4k per track
@@ -290,7 +283,7 @@ define_extent(struct ccw1 *ccw, struct DE_eckd_data *data, unsigned int trk,
ccw->cmd_code = DASD_ECKD_CCW_DEFINE_EXTENT;
ccw->flags = 0;
ccw->count = 16;
- ccw->cda = (__u32)virt_to_phys(data);
+ ccw->cda = virt_to_dma32(data);
}
memset(data, 0, sizeof(struct DE_eckd_data));
@@ -400,7 +393,7 @@ static void locate_record_ext(struct ccw1 *ccw, struct LRE_eckd_data *data,
ccw->count = 22;
else
ccw->count = 20;
- ccw->cda = (__u32)virt_to_phys(data);
+ ccw->cda = virt_to_dma32(data);
}
memset(data, 0, sizeof(*data));
@@ -546,11 +539,11 @@ static int prefix_LRE(struct ccw1 *ccw, struct PFX_eckd_data *pfxdata,
ccw->flags = 0;
if (cmd == DASD_ECKD_CCW_WRITE_FULL_TRACK) {
ccw->count = sizeof(*pfxdata) + 2;
- ccw->cda = (__u32)virt_to_phys(pfxdata);
+ ccw->cda = virt_to_dma32(pfxdata);
memset(pfxdata, 0, sizeof(*pfxdata) + 2);
} else {
ccw->count = sizeof(*pfxdata);
- ccw->cda = (__u32)virt_to_phys(pfxdata);
+ ccw->cda = virt_to_dma32(pfxdata);
memset(pfxdata, 0, sizeof(*pfxdata));
}
@@ -617,7 +610,7 @@ locate_record(struct ccw1 *ccw, struct LO_eckd_data *data, unsigned int trk,
ccw->cmd_code = DASD_ECKD_CCW_LOCATE_RECORD;
ccw->flags = 0;
ccw->count = 16;
- ccw->cda = (__u32)virt_to_phys(data);
+ ccw->cda = virt_to_dma32(data);
memset(data, 0, sizeof(struct LO_eckd_data));
sector = 0;
@@ -832,7 +825,7 @@ static void dasd_eckd_fill_rcd_cqr(struct dasd_device *device,
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_RCD;
ccw->flags = 0;
- ccw->cda = (__u32)virt_to_phys(rcd_buffer);
+ ccw->cda = virt_to_dma32(rcd_buffer);
ccw->count = DASD_ECKD_RCD_DATA_SIZE;
cqr->magic = DASD_ECKD_MAGIC;
@@ -860,7 +853,7 @@ static void read_conf_cb(struct dasd_ccw_req *cqr, void *data)
if (cqr->status != DASD_CQR_DONE) {
ccw = cqr->cpaddr;
- rcd_buffer = phys_to_virt(ccw->cda);
+ rcd_buffer = dma32_to_virt(ccw->cda);
memset(rcd_buffer, 0, sizeof(*rcd_buffer));
rcd_buffer[0] = 0xE5;
@@ -1072,22 +1065,14 @@ static void dasd_eckd_read_fc_security(struct dasd_device *device)
}
}
-static void dasd_eckd_get_uid_string(struct dasd_conf *conf,
- char *print_uid)
+static void dasd_eckd_get_uid_string(struct dasd_conf *conf, char *print_uid)
{
struct dasd_uid uid;
create_uid(conf, &uid);
- if (strlen(uid.vduit) > 0)
- snprintf(print_uid, DASD_UID_STRLEN,
- "%s.%s.%04x.%02x.%s",
- uid.vendor, uid.serial, uid.ssid,
- uid.real_unit_addr, uid.vduit);
- else
- snprintf(print_uid, DASD_UID_STRLEN,
- "%s.%s.%04x.%02x",
- uid.vendor, uid.serial, uid.ssid,
- uid.real_unit_addr);
+ snprintf(print_uid, DASD_UID_STRLEN, "%s.%s.%04x.%02x%s%s",
+ uid.vendor, uid.serial, uid.ssid, uid.real_unit_addr,
+ uid.vduit[0] ? "." : "", uid.vduit);
}
static int dasd_eckd_check_cabling(struct dasd_device *device,
@@ -1549,7 +1534,7 @@ static int dasd_eckd_read_features(struct dasd_device *device)
ccw->cmd_code = DASD_ECKD_CCW_PSF;
ccw->count = sizeof(struct dasd_psf_prssd_data);
ccw->flags |= CCW_FLAG_CC;
- ccw->cda = (__u32)virt_to_phys(prssdp);
+ ccw->cda = virt_to_dma32(prssdp);
/* Read Subsystem Data - feature codes */
features = (struct dasd_rssd_features *) (prssdp + 1);
@@ -1558,7 +1543,7 @@ static int dasd_eckd_read_features(struct dasd_device *device)
ccw++;
ccw->cmd_code = DASD_ECKD_CCW_RSSD;
ccw->count = sizeof(struct dasd_rssd_features);
- ccw->cda = (__u32)virt_to_phys(features);
+ ccw->cda = virt_to_dma32(features);
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
@@ -1618,7 +1603,7 @@ static int dasd_eckd_read_vol_info(struct dasd_device *device)
ccw->cmd_code = DASD_ECKD_CCW_PSF;
ccw->count = sizeof(*prssdp);
ccw->flags |= CCW_FLAG_CC;
- ccw->cda = (__u32)virt_to_phys(prssdp);
+ ccw->cda = virt_to_dma32(prssdp);
/* Read Subsystem Data - Volume Storage Query */
vsq = (struct dasd_rssd_vsq *)(prssdp + 1);
@@ -1628,7 +1613,7 @@ static int dasd_eckd_read_vol_info(struct dasd_device *device)
ccw->cmd_code = DASD_ECKD_CCW_RSSD;
ccw->count = sizeof(*vsq);
ccw->flags |= CCW_FLAG_SLI;
- ccw->cda = (__u32)virt_to_phys(vsq);
+ ccw->cda = virt_to_dma32(vsq);
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
@@ -1803,7 +1788,7 @@ static int dasd_eckd_read_ext_pool_info(struct dasd_device *device)
ccw->cmd_code = DASD_ECKD_CCW_PSF;
ccw->count = sizeof(*prssdp);
ccw->flags |= CCW_FLAG_CC;
- ccw->cda = (__u32)virt_to_phys(prssdp);
+ ccw->cda = virt_to_dma32(prssdp);
lcq = (struct dasd_rssd_lcq *)(prssdp + 1);
memset(lcq, 0, sizeof(*lcq));
@@ -1812,7 +1797,7 @@ static int dasd_eckd_read_ext_pool_info(struct dasd_device *device)
ccw->cmd_code = DASD_ECKD_CCW_RSSD;
ccw->count = sizeof(*lcq);
ccw->flags |= CCW_FLAG_SLI;
- ccw->cda = (__u32)virt_to_phys(lcq);
+ ccw->cda = virt_to_dma32(lcq);
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
@@ -1909,7 +1894,7 @@ static struct dasd_ccw_req *dasd_eckd_build_psf_ssc(struct dasd_device *device,
}
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_PSF;
- ccw->cda = (__u32)virt_to_phys(psf_ssc_data);
+ ccw->cda = virt_to_dma32(psf_ssc_data);
ccw->count = 66;
cqr->startdev = device;
@@ -2265,7 +2250,7 @@ dasd_eckd_analysis_ccw(struct dasd_device *device)
ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
ccw->flags = 0;
ccw->count = 8;
- ccw->cda = (__u32)virt_to_phys(count_data);
+ ccw->cda = virt_to_dma32(count_data);
ccw++;
count_data++;
}
@@ -2279,7 +2264,7 @@ dasd_eckd_analysis_ccw(struct dasd_device *device)
ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
ccw->flags = 0;
ccw->count = 8;
- ccw->cda = (__u32)virt_to_phys(count_data);
+ ccw->cda = virt_to_dma32(count_data);
cqr->block = NULL;
cqr->startdev = device;
@@ -2650,7 +2635,7 @@ dasd_eckd_build_check(struct dasd_device *base, struct format_data_t *fdata,
ccw->cmd_code = DASD_ECKD_CCW_READ_COUNT;
ccw->flags = CCW_FLAG_SLI;
ccw->count = 8;
- ccw->cda = (__u32)virt_to_phys(fmt_buffer);
+ ccw->cda = virt_to_dma32(fmt_buffer);
ccw++;
fmt_buffer++;
}
@@ -2860,7 +2845,7 @@ dasd_eckd_build_format(struct dasd_device *base, struct dasd_device *startdev,
ccw->cmd_code = DASD_ECKD_CCW_WRITE_RECORD_ZERO;
ccw->flags = CCW_FLAG_SLI;
ccw->count = 8;
- ccw->cda = (__u32)virt_to_phys(ect);
+ ccw->cda = virt_to_dma32(ect);
ccw++;
}
if ((intensity & ~0x08) & 0x04) { /* erase track */
@@ -2875,7 +2860,7 @@ dasd_eckd_build_format(struct dasd_device *base, struct dasd_device *startdev,
ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD;
ccw->flags = CCW_FLAG_SLI;
ccw->count = 8;
- ccw->cda = (__u32)virt_to_phys(ect);
+ ccw->cda = virt_to_dma32(ect);
} else { /* write remaining records */
for (i = 0; i < rpt; i++) {
ect = (struct eckd_count *) data;
@@ -2910,7 +2895,7 @@ dasd_eckd_build_format(struct dasd_device *base, struct dasd_device *startdev,
DASD_ECKD_CCW_WRITE_CKD_MT;
ccw->flags = CCW_FLAG_SLI;
ccw->count = 8;
- ccw->cda = (__u32)virt_to_phys(ect);
+ ccw->cda = virt_to_dma32(ect);
ccw++;
}
}
@@ -3851,7 +3836,7 @@ dasd_eckd_dso_ras(struct dasd_device *device, struct dasd_block *block,
}
ccw = cqr->cpaddr;
- ccw->cda = (__u32)virt_to_phys(cqr->data);
+ ccw->cda = virt_to_dma32(cqr->data);
ccw->cmd_code = DASD_ECKD_CCW_DSO;
ccw->count = size;
@@ -3976,7 +3961,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
unsigned int blksize)
{
struct dasd_eckd_private *private;
- unsigned long *idaws;
+ dma64_t *idaws;
struct LO_eckd_data *LO_data;
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
@@ -4054,8 +4039,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
dasd_sfree_request(cqr, startdev);
return ERR_PTR(-EAGAIN);
}
- idaws = (unsigned long *) (cqr->data +
- sizeof(struct PFX_eckd_data));
+ idaws = (dma64_t *)(cqr->data + sizeof(struct PFX_eckd_data));
} else {
if (define_extent(ccw++, cqr->data, first_trk,
last_trk, cmd, basedev, 0) == -EAGAIN) {
@@ -4065,8 +4049,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
dasd_sfree_request(cqr, startdev);
return ERR_PTR(-EAGAIN);
}
- idaws = (unsigned long *) (cqr->data +
- sizeof(struct DE_eckd_data));
+ idaws = (dma64_t *)(cqr->data + sizeof(struct DE_eckd_data));
}
/* Build locate_record+read/write/ccws. */
LO_data = (struct LO_eckd_data *) (idaws + cidaw);
@@ -4120,11 +4103,11 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
ccw->cmd_code = rcmd;
ccw->count = count;
if (idal_is_needed(dst, blksize)) {
- ccw->cda = (__u32)virt_to_phys(idaws);
+ ccw->cda = virt_to_dma32(idaws);
ccw->flags = CCW_FLAG_IDA;
idaws = idal_create_words(idaws, dst, blksize);
} else {
- ccw->cda = (__u32)virt_to_phys(dst);
+ ccw->cda = virt_to_dma32(dst);
ccw->flags = 0;
}
ccw++;
@@ -4167,7 +4150,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
unsigned int blk_per_trk,
unsigned int blksize)
{
- unsigned long *idaws;
+ dma64_t *idaws;
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
struct req_iterator iter;
@@ -4237,7 +4220,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
* (or 2K blocks on 31-bit)
* - the scope of a ccw and it's idal ends with the track boundaries
*/
- idaws = (unsigned long *) (cqr->data + sizeof(struct PFX_eckd_data));
+ idaws = (dma64_t *)(cqr->data + sizeof(struct PFX_eckd_data));
recid = first_rec;
new_track = 1;
end_idaw = 0;
@@ -4258,7 +4241,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
ccw[-1].flags |= CCW_FLAG_CC;
ccw->cmd_code = cmd;
ccw->count = len_to_track_end;
- ccw->cda = (__u32)virt_to_phys(idaws);
+ ccw->cda = virt_to_dma32(idaws);
ccw->flags = CCW_FLAG_IDA;
ccw++;
recid += count;
@@ -4274,7 +4257,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
* idaw ends
*/
if (!idaw_dst) {
- if ((__u32)virt_to_phys(dst) & (IDA_BLOCK_SIZE - 1)) {
+ if ((unsigned long)(dst) & (IDA_BLOCK_SIZE - 1)) {
dasd_sfree_request(cqr, startdev);
return ERR_PTR(-ERANGE);
} else
@@ -4294,7 +4277,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
* idal_create_words will handle cases where idaw_len
* is larger then IDA_BLOCK_SIZE
*/
- if (!((__u32)virt_to_phys(idaw_dst + idaw_len) & (IDA_BLOCK_SIZE - 1)))
+ if (!((unsigned long)(idaw_dst + idaw_len) & (IDA_BLOCK_SIZE - 1)))
end_idaw = 1;
/* We also need to end the idaw at track end */
if (!len_to_track_end) {
@@ -4753,11 +4736,11 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev,
struct req_iterator iter;
struct dasd_ccw_req *cqr;
unsigned int trkcount;
- unsigned long *idaws;
unsigned int size;
unsigned char cmd;
struct bio_vec bv;
struct ccw1 *ccw;
+ dma64_t *idaws;
int use_prefix;
void *data;
char *dst;
@@ -4838,7 +4821,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev,
trkcount, cmd, basedev, 0, 0);
}
- idaws = (unsigned long *)(cqr->data + size);
+ idaws = (dma64_t *)(cqr->data + size);
len_to_track_end = 0;
if (start_padding_sectors) {
ccw[-1].flags |= CCW_FLAG_CC;
@@ -4847,7 +4830,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev,
ccw->count = 57326;
/* 64k map to one track */
len_to_track_end = 65536 - start_padding_sectors * 512;
- ccw->cda = (__u32)virt_to_phys(idaws);
+ ccw->cda = virt_to_dma32(idaws);
ccw->flags |= CCW_FLAG_IDA;
ccw->flags |= CCW_FLAG_SLI;
ccw++;
@@ -4866,7 +4849,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_raw(struct dasd_device *startdev,
ccw->count = 57326;
/* 64k map to one track */
len_to_track_end = 65536;
- ccw->cda = (__u32)virt_to_phys(idaws);
+ ccw->cda = virt_to_dma32(idaws);
ccw->flags |= CCW_FLAG_IDA;
ccw->flags |= CCW_FLAG_SLI;
ccw++;
@@ -4923,9 +4906,9 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
ccw++;
if (dst) {
if (ccw->flags & CCW_FLAG_IDA)
- cda = *((char **)phys_to_virt(ccw->cda));
+ cda = *((char **)dma32_to_virt(ccw->cda));
else
- cda = phys_to_virt(ccw->cda);
+ cda = dma32_to_virt(ccw->cda);
if (dst != cda) {
if (rq_data_dir(req) == READ)
memcpy(dst, cda, bv.bv_len);
@@ -5075,7 +5058,7 @@ dasd_eckd_release(struct dasd_device *device)
ccw->cmd_code = DASD_ECKD_CCW_RELEASE;
ccw->flags |= CCW_FLAG_SLI;
ccw->count = 32;
- ccw->cda = (__u32)virt_to_phys(cqr->data);
+ ccw->cda = virt_to_dma32(cqr->data);
cqr->startdev = device;
cqr->memdev = device;
clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
@@ -5130,7 +5113,7 @@ dasd_eckd_reserve(struct dasd_device *device)
ccw->cmd_code = DASD_ECKD_CCW_RESERVE;
ccw->flags |= CCW_FLAG_SLI;
ccw->count = 32;
- ccw->cda = (__u32)virt_to_phys(cqr->data);
+ ccw->cda = virt_to_dma32(cqr->data);
cqr->startdev = device;
cqr->memdev = device;
clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
@@ -5184,7 +5167,7 @@ dasd_eckd_steal_lock(struct dasd_device *device)
ccw->cmd_code = DASD_ECKD_CCW_SLCK;
ccw->flags |= CCW_FLAG_SLI;
ccw->count = 32;
- ccw->cda = (__u32)virt_to_phys(cqr->data);
+ ccw->cda = virt_to_dma32(cqr->data);
cqr->startdev = device;
cqr->memdev = device;
clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
@@ -5245,7 +5228,7 @@ static int dasd_eckd_snid(struct dasd_device *device,
ccw->cmd_code = DASD_ECKD_CCW_SNID;
ccw->flags |= CCW_FLAG_SLI;
ccw->count = 12;
- ccw->cda = (__u32)virt_to_phys(cqr->data);
+ ccw->cda = virt_to_dma32(cqr->data);
cqr->startdev = device;
cqr->memdev = device;
clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
@@ -5312,7 +5295,7 @@ dasd_eckd_performance(struct dasd_device *device, void __user *argp)
ccw->cmd_code = DASD_ECKD_CCW_PSF;
ccw->count = sizeof(struct dasd_psf_prssd_data);
ccw->flags |= CCW_FLAG_CC;
- ccw->cda = (__u32)virt_to_phys(prssdp);
+ ccw->cda = virt_to_dma32(prssdp);
/* Read Subsystem Data - Performance Statistics */
stats = (struct dasd_rssd_perf_stats_t *) (prssdp + 1);
@@ -5321,7 +5304,7 @@ dasd_eckd_performance(struct dasd_device *device, void __user *argp)
ccw++;
ccw->cmd_code = DASD_ECKD_CCW_RSSD;
ccw->count = sizeof(struct dasd_rssd_perf_stats_t);
- ccw->cda = (__u32)virt_to_phys(stats);
+ ccw->cda = virt_to_dma32(stats);
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
@@ -5465,7 +5448,7 @@ static int dasd_symm_io(struct dasd_device *device, void __user *argp)
ccw->cmd_code = DASD_ECKD_CCW_PSF;
ccw->count = usrparm.psf_data_len;
ccw->flags |= CCW_FLAG_CC;
- ccw->cda = (__u32)virt_to_phys(psf_data);
+ ccw->cda = virt_to_dma32(psf_data);
ccw++;
@@ -5473,7 +5456,7 @@ static int dasd_symm_io(struct dasd_device *device, void __user *argp)
ccw->cmd_code = DASD_ECKD_CCW_RSSD;
ccw->count = usrparm.rssd_result_len;
ccw->flags = CCW_FLAG_SLI ;
- ccw->cda = (__u32)virt_to_phys(rssd_result);
+ ccw->cda = virt_to_dma32(rssd_result);
rc = dasd_sleep_on(cqr);
if (rc)
@@ -5529,22 +5512,22 @@ dasd_eckd_ioctl(struct dasd_block *block, unsigned int cmd, void __user *argp)
* and return number of printed chars.
*/
static void
-dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page)
+dasd_eckd_dump_ccw_range(struct dasd_device *device, struct ccw1 *from,
+ struct ccw1 *to, char *page)
{
int len, count;
char *datap;
len = 0;
while (from <= to) {
- len += sprintf(page + len, PRINTK_HEADER
- " CCW %p: %08X %08X DAT:",
+ len += sprintf(page + len, "CCW %px: %08X %08X DAT:",
from, ((int *) from)[0], ((int *) from)[1]);
/* get pointer to data (consider IDALs) */
if (from->flags & CCW_FLAG_IDA)
- datap = (char *)*((addr_t *)phys_to_virt(from->cda));
+ datap = (char *)*((addr_t *)dma32_to_virt(from->cda));
else
- datap = phys_to_virt(from->cda);
+ datap = dma32_to_virt(from->cda);
/* dump data (max 128 bytes) */
for (count = 0; count < from->count && count < 128; count++) {
@@ -5560,7 +5543,7 @@ dasd_eckd_dump_ccw_range(struct ccw1 *from, struct ccw1 *to, char *page)
from++;
}
if (len > 0)
- printk(KERN_ERR "%s", page);
+ dev_err(&device->cdev->dev, "%s", page);
}
static void
@@ -5591,9 +5574,12 @@ dasd_eckd_dump_sense_dbf(struct dasd_device *device, struct irb *irb,
static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
struct dasd_ccw_req *req, struct irb *irb)
{
- char *page;
struct ccw1 *first, *last, *fail, *from, *to;
+ struct device *dev;
int len, sl, sct;
+ char *page;
+
+ dev = &device->cdev->dev;
page = (char *) get_zeroed_page(GFP_ATOMIC);
if (page == NULL) {
@@ -5602,24 +5588,18 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
return;
}
/* dump the sense data */
- len = sprintf(page, PRINTK_HEADER
- " I/O status report for device %s:\n",
- dev_name(&device->cdev->dev));
- len += sprintf(page + len, PRINTK_HEADER
- " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
- "CS:%02X RC:%d\n",
+ len = sprintf(page, "I/O status report:\n");
+ len += sprintf(page + len,
+ "in req: %px CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X CS:%02X RC:%d\n",
req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
scsw_dstat(&irb->scsw), scsw_cstat(&irb->scsw),
req ? req->intrc : 0);
- len += sprintf(page + len, PRINTK_HEADER
- " device %s: Failing CCW: %p\n",
- dev_name(&device->cdev->dev),
- phys_to_virt(irb->scsw.cmd.cpa));
+ len += sprintf(page + len, "Failing CCW: %px\n",
+ dma32_to_virt(irb->scsw.cmd.cpa));
if (irb->esw.esw0.erw.cons) {
for (sl = 0; sl < 4; sl++) {
- len += sprintf(page + len, PRINTK_HEADER
- " Sense(hex) %2d-%2d:",
+ len += sprintf(page + len, "Sense(hex) %2d-%2d:",
(8 * sl), ((8 * sl) + 7));
for (sct = 0; sct < 8; sct++) {
@@ -5631,23 +5611,20 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
if (irb->ecw[27] & DASD_SENSE_BIT_0) {
/* 24 Byte Sense Data */
- sprintf(page + len, PRINTK_HEADER
- " 24 Byte: %x MSG %x, "
- "%s MSGb to SYSOP\n",
+ sprintf(page + len,
+ "24 Byte: %x MSG %x, %s MSGb to SYSOP\n",
irb->ecw[7] >> 4, irb->ecw[7] & 0x0f,
irb->ecw[1] & 0x10 ? "" : "no");
} else {
/* 32 Byte Sense Data */
- sprintf(page + len, PRINTK_HEADER
- " 32 Byte: Format: %x "
- "Exception class %x\n",
+ sprintf(page + len,
+ "32 Byte: Format: %x Exception class %x\n",
irb->ecw[6] & 0x0f, irb->ecw[22] >> 4);
}
} else {
- sprintf(page + len, PRINTK_HEADER
- " SORRY - NO VALID SENSE AVAILABLE\n");
+ sprintf(page + len, "SORRY - NO VALID SENSE AVAILABLE\n");
}
- printk(KERN_ERR "%s", page);
+ dev_err(dev, "%s", page);
if (req) {
/* req == NULL for unsolicited interrupts */
@@ -5656,28 +5633,28 @@ static void dasd_eckd_dump_sense_ccw(struct dasd_device *device,
first = req->cpaddr;
for (last = first; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
to = min(first + 6, last);
- printk(KERN_ERR PRINTK_HEADER " Related CP in req: %p\n", req);
- dasd_eckd_dump_ccw_range(first, to, page);
+ dev_err(dev, "Related CP in req: %px\n", req);
+ dasd_eckd_dump_ccw_range(device, first, to, page);
/* print failing CCW area (maximum 4) */
/* scsw->cda is either valid or zero */
from = ++to;
- fail = phys_to_virt(irb->scsw.cmd.cpa); /* failing CCW */
+ fail = dma32_to_virt(irb->scsw.cmd.cpa); /* failing CCW */
if (from < fail - 2) {
from = fail - 2; /* there is a gap - print header */
- printk(KERN_ERR PRINTK_HEADER "......\n");
+ dev_err(dev, "......\n");
}
to = min(fail + 1, last);
- dasd_eckd_dump_ccw_range(from, to, page + len);
+ dasd_eckd_dump_ccw_range(device, from, to, page + len);
/* print last CCWs (maximum 2) */
len = 0;
from = max(from, ++to);
if (from < last - 1) {
from = last - 1; /* there is a gap - print header */
- printk(KERN_ERR PRINTK_HEADER "......\n");
+ dev_err(dev, "......\n");
}
- dasd_eckd_dump_ccw_range(from, last, page + len);
+ dasd_eckd_dump_ccw_range(device, from, last, page + len);
}
free_page((unsigned long) page);
}
@@ -5701,11 +5678,9 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
return;
}
/* dump the sense data */
- len = sprintf(page, PRINTK_HEADER
- " I/O status report for device %s:\n",
- dev_name(&device->cdev->dev));
- len += sprintf(page + len, PRINTK_HEADER
- " in req: %p CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
+ len = sprintf(page, "I/O status report:\n");
+ len += sprintf(page + len,
+ "in req: %px CC:%02X FC:%02X AC:%02X SC:%02X DS:%02X "
"CS:%02X fcxs:%02X schxs:%02X RC:%d\n",
req, scsw_cc(&irb->scsw), scsw_fctl(&irb->scsw),
scsw_actl(&irb->scsw), scsw_stctl(&irb->scsw),
@@ -5713,58 +5688,46 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
irb->scsw.tm.fcxs,
(irb->scsw.tm.ifob << 7) | irb->scsw.tm.sesq,
req ? req->intrc : 0);
- len += sprintf(page + len, PRINTK_HEADER
- " device %s: Failing TCW: %p\n",
- dev_name(&device->cdev->dev),
- phys_to_virt(irb->scsw.tm.tcw));
+ len += sprintf(page + len, "Failing TCW: %px\n",
+ dma32_to_virt(irb->scsw.tm.tcw));
tsb = NULL;
sense = NULL;
if (irb->scsw.tm.tcw && (irb->scsw.tm.fcxs & 0x01))
- tsb = tcw_get_tsb(phys_to_virt(irb->scsw.tm.tcw));
+ tsb = tcw_get_tsb(dma32_to_virt(irb->scsw.tm.tcw));
if (tsb) {
- len += sprintf(page + len, PRINTK_HEADER
- " tsb->length %d\n", tsb->length);
- len += sprintf(page + len, PRINTK_HEADER
- " tsb->flags %x\n", tsb->flags);
- len += sprintf(page + len, PRINTK_HEADER
- " tsb->dcw_offset %d\n", tsb->dcw_offset);
- len += sprintf(page + len, PRINTK_HEADER
- " tsb->count %d\n", tsb->count);
+ len += sprintf(page + len, "tsb->length %d\n", tsb->length);
+ len += sprintf(page + len, "tsb->flags %x\n", tsb->flags);
+ len += sprintf(page + len, "tsb->dcw_offset %d\n", tsb->dcw_offset);
+ len += sprintf(page + len, "tsb->count %d\n", tsb->count);
residual = tsb->count - 28;
- len += sprintf(page + len, PRINTK_HEADER
- " residual %d\n", residual);
+ len += sprintf(page + len, "residual %d\n", residual);
switch (tsb->flags & 0x07) {
case 1: /* tsa_iostat */
- len += sprintf(page + len, PRINTK_HEADER
- " tsb->tsa.iostat.dev_time %d\n",
+ len += sprintf(page + len, "tsb->tsa.iostat.dev_time %d\n",
tsb->tsa.iostat.dev_time);
- len += sprintf(page + len, PRINTK_HEADER
- " tsb->tsa.iostat.def_time %d\n",
+ len += sprintf(page + len, "tsb->tsa.iostat.def_time %d\n",
tsb->tsa.iostat.def_time);
- len += sprintf(page + len, PRINTK_HEADER
- " tsb->tsa.iostat.queue_time %d\n",
+ len += sprintf(page + len, "tsb->tsa.iostat.queue_time %d\n",
tsb->tsa.iostat.queue_time);
- len += sprintf(page + len, PRINTK_HEADER
- " tsb->tsa.iostat.dev_busy_time %d\n",
+ len += sprintf(page + len, "tsb->tsa.iostat.dev_busy_time %d\n",
tsb->tsa.iostat.dev_busy_time);
- len += sprintf(page + len, PRINTK_HEADER
- " tsb->tsa.iostat.dev_act_time %d\n",
+ len += sprintf(page + len, "tsb->tsa.iostat.dev_act_time %d\n",
tsb->tsa.iostat.dev_act_time);
sense = tsb->tsa.iostat.sense;
break;
case 2: /* ts_ddpc */
- len += sprintf(page + len, PRINTK_HEADER
- " tsb->tsa.ddpc.rc %d\n", tsb->tsa.ddpc.rc);
+ len += sprintf(page + len, "tsb->tsa.ddpc.rc %d\n",
+ tsb->tsa.ddpc.rc);
for (sl = 0; sl < 2; sl++) {
- len += sprintf(page + len, PRINTK_HEADER
- " tsb->tsa.ddpc.rcq %2d-%2d: ",
+ len += sprintf(page + len,
+ "tsb->tsa.ddpc.rcq %2d-%2d: ",
(8 * sl), ((8 * sl) + 7));
rcq = tsb->tsa.ddpc.rcq;
for (sct = 0; sct < 8; sct++) {
- len += sprintf(page + len, " %02x",
+ len += sprintf(page + len, "%02x",
rcq[8 * sl + sct]);
}
len += sprintf(page + len, "\n");
@@ -5772,15 +5735,15 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
sense = tsb->tsa.ddpc.sense;
break;
case 3: /* tsa_intrg */
- len += sprintf(page + len, PRINTK_HEADER
- " tsb->tsa.intrg.: not supported yet\n");
+ len += sprintf(page + len,
+ "tsb->tsa.intrg.: not supported yet\n");
break;
}
if (sense) {
for (sl = 0; sl < 4; sl++) {
- len += sprintf(page + len, PRINTK_HEADER
- " Sense(hex) %2d-%2d:",
+ len += sprintf(page + len,
+ "Sense(hex) %2d-%2d:",
(8 * sl), ((8 * sl) + 7));
for (sct = 0; sct < 8; sct++) {
len += sprintf(page + len, " %02x",
@@ -5791,27 +5754,23 @@ static void dasd_eckd_dump_sense_tcw(struct dasd_device *device,
if (sense[27] & DASD_SENSE_BIT_0) {
/* 24 Byte Sense Data */
- sprintf(page + len, PRINTK_HEADER
- " 24 Byte: %x MSG %x, "
- "%s MSGb to SYSOP\n",
+ sprintf(page + len,
+ "24 Byte: %x MSG %x, %s MSGb to SYSOP\n",
sense[7] >> 4, sense[7] & 0x0f,
sense[1] & 0x10 ? "" : "no");
} else {
/* 32 Byte Sense Data */
- sprintf(page + len, PRINTK_HEADER
- " 32 Byte: Format: %x "
- "Exception class %x\n",
+ sprintf(page + len,
+ "32 Byte: Format: %x Exception class %x\n",
sense[6] & 0x0f, sense[22] >> 4);
}
} else {
- sprintf(page + len, PRINTK_HEADER
- " SORRY - NO VALID SENSE AVAILABLE\n");
+ sprintf(page + len, "SORRY - NO VALID SENSE AVAILABLE\n");
}
} else {
- sprintf(page + len, PRINTK_HEADER
- " SORRY - NO TSB DATA AVAILABLE\n");
+ sprintf(page + len, "SORRY - NO TSB DATA AVAILABLE\n");
}
- printk(KERN_ERR "%s", page);
+ dev_err(&device->cdev->dev, "%s", page);
free_page((unsigned long) page);
}
@@ -5945,7 +5904,7 @@ retry:
ccw->count = sizeof(struct dasd_psf_prssd_data);
ccw->flags |= CCW_FLAG_CC;
ccw->flags |= CCW_FLAG_SLI;
- ccw->cda = (__u32)virt_to_phys(prssdp);
+ ccw->cda = virt_to_dma32(prssdp);
/* Read Subsystem Data - message buffer */
message_buf = (struct dasd_rssd_messages *) (prssdp + 1);
@@ -5955,7 +5914,7 @@ retry:
ccw->cmd_code = DASD_ECKD_CCW_RSSD;
ccw->count = sizeof(struct dasd_rssd_messages);
ccw->flags |= CCW_FLAG_SLI;
- ccw->cda = (__u32)virt_to_phys(message_buf);
+ ccw->cda = virt_to_dma32(message_buf);
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
@@ -6036,14 +5995,14 @@ static int dasd_eckd_query_host_access(struct dasd_device *device,
ccw->count = sizeof(struct dasd_psf_prssd_data);
ccw->flags |= CCW_FLAG_CC;
ccw->flags |= CCW_FLAG_SLI;
- ccw->cda = (__u32)virt_to_phys(prssdp);
+ ccw->cda = virt_to_dma32(prssdp);
/* Read Subsystem Data - query host access */
ccw++;
ccw->cmd_code = DASD_ECKD_CCW_RSSD;
ccw->count = sizeof(struct dasd_psf_query_host_access);
ccw->flags |= CCW_FLAG_SLI;
- ccw->cda = (__u32)virt_to_phys(host_access);
+ ccw->cda = virt_to_dma32(host_access);
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
@@ -6278,14 +6237,14 @@ static int dasd_eckd_query_pprc_status(struct dasd_device *device,
ccw->count = sizeof(struct dasd_psf_prssd_data);
ccw->flags |= CCW_FLAG_CC;
ccw->flags |= CCW_FLAG_SLI;
- ccw->cda = (__u32)(addr_t)prssdp;
+ ccw->cda = virt_to_dma32(prssdp);
/* Read Subsystem Data - query host access */
ccw++;
ccw->cmd_code = DASD_ECKD_CCW_RSSD;
ccw->count = sizeof(*pprc_data);
ccw->flags |= CCW_FLAG_SLI;
- ccw->cda = (__u32)(addr_t)pprc_data;
+ ccw->cda = virt_to_dma32(pprc_data);
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
@@ -6379,7 +6338,7 @@ dasd_eckd_psf_cuir_response(struct dasd_device *device, int response,
psf_cuir->ssid = device->path[pos].ssid;
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_PSF;
- ccw->cda = (__u32)virt_to_phys(psf_cuir);
+ ccw->cda = virt_to_dma32(psf_cuir);
ccw->flags = CCW_FLAG_SLI;
ccw->count = sizeof(struct dasd_psf_cuir_response);
@@ -6865,17 +6824,9 @@ static void dasd_eckd_handle_hpf_error(struct dasd_device *device,
dasd_schedule_requeue(device);
}
-/*
- * Initialize block layer request queue.
- */
-static void dasd_eckd_setup_blk_queue(struct dasd_block *block)
+static unsigned int dasd_eckd_max_sectors(struct dasd_block *block)
{
- unsigned int logical_block_size = block->bp_block;
- struct request_queue *q = block->gdp->queue;
- struct dasd_device *device = block->base;
- int max;
-
- if (device->features & DASD_FEATURE_USERAW) {
+ if (block->base->features & DASD_FEATURE_USERAW) {
/*
* the max_blocks value for raw_track access is 256
* it is higher than the native ECKD value because we
@@ -6883,19 +6834,10 @@ static void dasd_eckd_setup_blk_queue(struct dasd_block *block)
* so the max_hw_sectors are
* 2048 x 512B = 1024kB = 16 tracks
*/
- max = DASD_ECKD_MAX_BLOCKS_RAW << block->s2b_shift;
- } else {
- max = DASD_ECKD_MAX_BLOCKS << block->s2b_shift;
+ return DASD_ECKD_MAX_BLOCKS_RAW << block->s2b_shift;
}
- blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
- q->limits.max_dev_sectors = max;
- blk_queue_logical_block_size(q, logical_block_size);
- blk_queue_max_hw_sectors(q, max);
- blk_queue_max_segments(q, USHRT_MAX);
- /* With page sized segments each segment can be translated into one idaw/tidaw */
- blk_queue_max_segment_size(q, PAGE_SIZE);
- blk_queue_segment_boundary(q, PAGE_SIZE - 1);
- blk_queue_dma_alignment(q, PAGE_SIZE - 1);
+
+ return DASD_ECKD_MAX_BLOCKS << block->s2b_shift;
}
static struct ccw_driver dasd_eckd_driver = {
@@ -6927,7 +6869,7 @@ static struct dasd_discipline dasd_eckd_discipline = {
.basic_to_ready = dasd_eckd_basic_to_ready,
.online_to_ready = dasd_eckd_online_to_ready,
.basic_to_known = dasd_eckd_basic_to_known,
- .setup_blk_queue = dasd_eckd_setup_blk_queue,
+ .max_sectors = dasd_eckd_max_sectors,
.fill_geometry = dasd_eckd_fill_geometry,
.start_IO = dasd_start_IO,
.term_IO = dasd_term_IO,
diff --git a/drivers/s390/block/dasd_eer.c b/drivers/s390/block/dasd_eer.c
index c956de711c..194e9e2d9c 100644
--- a/drivers/s390/block/dasd_eer.c
+++ b/drivers/s390/block/dasd_eer.c
@@ -7,8 +7,6 @@
* Author(s): Stefan Weinhuber <wein@de.ibm.com>
*/
-#define KMSG_COMPONENT "dasd-eckd"
-
#include <linux/init.h>
#include <linux/fs.h>
#include <linux/kernel.h>
@@ -28,11 +26,6 @@
#include "dasd_int.h"
#include "dasd_eckd.h"
-#ifdef PRINTK_HEADER
-#undef PRINTK_HEADER
-#endif /* PRINTK_HEADER */
-#define PRINTK_HEADER "dasd(eer):"
-
/*
* SECTION: the internal buffer
*/
@@ -492,7 +485,7 @@ int dasd_eer_enable(struct dasd_device *device)
ccw->cmd_code = DASD_ECKD_CCW_SNSS;
ccw->count = SNSS_DATA_SIZE;
ccw->flags = 0;
- ccw->cda = (__u32)virt_to_phys(cqr->data);
+ ccw->cda = virt_to_dma32(cqr->data);
cqr->buildclk = get_tod_clock();
cqr->status = DASD_CQR_FILLED;
diff --git a/drivers/s390/block/dasd_erp.c b/drivers/s390/block/dasd_erp.c
index c07e6e7135..4c0d3a7045 100644
--- a/drivers/s390/block/dasd_erp.c
+++ b/drivers/s390/block/dasd_erp.c
@@ -9,8 +9,6 @@
*
*/
-#define KMSG_COMPONENT "dasd"
-
#include <linux/ctype.h>
#include <linux/init.h>
@@ -18,9 +16,6 @@
#include <asm/ebcdic.h>
#include <linux/uaccess.h>
-/* This is ugly... */
-#define PRINTK_HEADER "dasd_erp:"
-
#include "dasd_int.h"
struct dasd_ccw_req *
@@ -170,12 +165,12 @@ dasd_log_sense(struct dasd_ccw_req *cqr, struct irb *irb)
device = cqr->startdev;
if (cqr->intrc == -ETIMEDOUT) {
dev_err(&device->cdev->dev,
- "A timeout error occurred for cqr %p\n", cqr);
+ "A timeout error occurred for cqr %px\n", cqr);
return;
}
if (cqr->intrc == -ENOLINK) {
dev_err(&device->cdev->dev,
- "A transport error occurred for cqr %p\n", cqr);
+ "A transport error occurred for cqr %px\n", cqr);
return;
}
/* dump sense data */
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index c06fa2b271..361e9bd752 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -25,11 +25,6 @@
#include "dasd_int.h"
#include "dasd_fba.h"
-#ifdef PRINTK_HEADER
-#undef PRINTK_HEADER
-#endif /* PRINTK_HEADER */
-#define PRINTK_HEADER "dasd(fba):"
-
#define FBA_DEFAULT_RETRIES 32
#define DASD_FBA_CCW_WRITE 0x41
@@ -83,7 +78,7 @@ define_extent(struct ccw1 * ccw, struct DE_fba_data *data, int rw,
ccw->cmd_code = DASD_FBA_CCW_DEFINE_EXTENT;
ccw->flags = 0;
ccw->count = 16;
- ccw->cda = (__u32)virt_to_phys(data);
+ ccw->cda = virt_to_dma32(data);
memset(data, 0, sizeof (struct DE_fba_data));
if (rw == WRITE)
(data->mask).perm = 0x0;
@@ -103,7 +98,7 @@ locate_record(struct ccw1 * ccw, struct LO_fba_data *data, int rw,
ccw->cmd_code = DASD_FBA_CCW_LOCATE;
ccw->flags = 0;
ccw->count = 8;
- ccw->cda = (__u32)virt_to_phys(data);
+ ccw->cda = virt_to_dma32(data);
memset(data, 0, sizeof (struct LO_fba_data));
if (rw == WRITE)
data->operation.cmd = 0x5;
@@ -262,7 +257,7 @@ static void ccw_write_zero(struct ccw1 *ccw, int count)
ccw->cmd_code = DASD_FBA_CCW_WRITE;
ccw->flags |= CCW_FLAG_SLI;
ccw->count = count;
- ccw->cda = (__u32)virt_to_phys(dasd_fba_zero_page);
+ ccw->cda = virt_to_dma32(dasd_fba_zero_page);
}
/*
@@ -432,7 +427,7 @@ static struct dasd_ccw_req *dasd_fba_build_cp_regular(
struct request *req)
{
struct dasd_fba_private *private = block->base->private;
- unsigned long *idaws;
+ dma64_t *idaws;
struct LO_fba_data *LO_data;
struct dasd_ccw_req *cqr;
struct ccw1 *ccw;
@@ -492,7 +487,7 @@ static struct dasd_ccw_req *dasd_fba_build_cp_regular(
define_extent(ccw++, cqr->data, rq_data_dir(req),
block->bp_block, blk_rq_pos(req), blk_rq_sectors(req));
/* Build locate_record + read/write ccws. */
- idaws = (unsigned long *) (cqr->data + sizeof(struct DE_fba_data));
+ idaws = (dma64_t *)(cqr->data + sizeof(struct DE_fba_data));
LO_data = (struct LO_fba_data *) (idaws + cidaw);
/* Locate record for all blocks for smart devices. */
if (private->rdc_data.mode.bits.data_chain != 0) {
@@ -528,11 +523,11 @@ static struct dasd_ccw_req *dasd_fba_build_cp_regular(
ccw->cmd_code = cmd;
ccw->count = block->bp_block;
if (idal_is_needed(dst, blksize)) {
- ccw->cda = (__u32)virt_to_phys(idaws);
+ ccw->cda = virt_to_dma32(idaws);
ccw->flags = CCW_FLAG_IDA;
idaws = idal_create_words(idaws, dst, blksize);
} else {
- ccw->cda = (__u32)virt_to_phys(dst);
+ ccw->cda = virt_to_dma32(dst);
ccw->flags = 0;
}
ccw++;
@@ -590,9 +585,9 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
ccw++;
if (dst) {
if (ccw->flags & CCW_FLAG_IDA)
- cda = *((char **)phys_to_virt(ccw->cda));
+ cda = *((char **)dma32_to_virt(ccw->cda));
else
- cda = phys_to_virt(ccw->cda);
+ cda = dma32_to_virt(ccw->cda);
if (dst != cda) {
if (rq_data_dir(req) == READ)
memcpy(dst, cda, bv.bv_len);
@@ -660,30 +655,27 @@ static void
dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
struct irb *irb)
{
- char *page;
struct ccw1 *act, *end, *last;
int len, sl, sct, count;
+ struct device *dev;
+ char *page;
+
+ dev = &device->cdev->dev;
page = (char *) get_zeroed_page(GFP_ATOMIC);
if (page == NULL) {
DBF_DEV_EVENT(DBF_WARNING, device, "%s",
- "No memory to dump sense data");
+ "No memory to dump sense data");
return;
}
- len = sprintf(page, PRINTK_HEADER
- " I/O status report for device %s:\n",
- dev_name(&device->cdev->dev));
- len += sprintf(page + len, PRINTK_HEADER
- " in req: %p CS: 0x%02X DS: 0x%02X\n", req,
- irb->scsw.cmd.cstat, irb->scsw.cmd.dstat);
- len += sprintf(page + len, PRINTK_HEADER
- " device %s: Failing CCW: %p\n",
- dev_name(&device->cdev->dev),
- (void *) (addr_t) irb->scsw.cmd.cpa);
+ len = sprintf(page, "I/O status report:\n");
+ len += sprintf(page + len, "in req: %px CS: 0x%02X DS: 0x%02X\n",
+ req, irb->scsw.cmd.cstat, irb->scsw.cmd.dstat);
+ len += sprintf(page + len, "Failing CCW: %px\n",
+ (void *)(u64)dma32_to_u32(irb->scsw.cmd.cpa));
if (irb->esw.esw0.erw.cons) {
for (sl = 0; sl < 4; sl++) {
- len += sprintf(page + len, PRINTK_HEADER
- " Sense(hex) %2d-%2d:",
+ len += sprintf(page + len, "Sense(hex) %2d-%2d:",
(8 * sl), ((8 * sl) + 7));
for (sct = 0; sct < 8; sct++) {
@@ -693,47 +685,43 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
len += sprintf(page + len, "\n");
}
} else {
- len += sprintf(page + len, PRINTK_HEADER
- " SORRY - NO VALID SENSE AVAILABLE\n");
+ len += sprintf(page + len, "SORRY - NO VALID SENSE AVAILABLE\n");
}
- printk(KERN_ERR "%s", page);
+ dev_err(dev, "%s", page);
/* dump the Channel Program */
/* print first CCWs (maximum 8) */
act = req->cpaddr;
- for (last = act; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
+ for (last = act; last->flags & (CCW_FLAG_CC | CCW_FLAG_DC); last++);
end = min(act + 8, last);
- len = sprintf(page, PRINTK_HEADER " Related CP in req: %p\n", req);
+ len = sprintf(page, "Related CP in req: %px\n", req);
while (act <= end) {
- len += sprintf(page + len, PRINTK_HEADER
- " CCW %p: %08X %08X DAT:",
+ len += sprintf(page + len, "CCW %px: %08X %08X DAT:",
act, ((int *) act)[0], ((int *) act)[1]);
for (count = 0; count < 32 && count < act->count;
count += sizeof(int))
len += sprintf(page + len, " %08X",
- ((int *) (addr_t) act->cda)
+ ((int *)dma32_to_virt(act->cda))
[(count>>2)]);
len += sprintf(page + len, "\n");
act++;
}
- printk(KERN_ERR "%s", page);
-
+ dev_err(dev, "%s", page);
/* print failing CCW area */
len = 0;
- if (act < ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2) {
- act = ((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa) - 2;
- len += sprintf(page + len, PRINTK_HEADER "......\n");
+ if (act < ((struct ccw1 *)dma32_to_virt(irb->scsw.cmd.cpa)) - 2) {
+ act = ((struct ccw1 *)dma32_to_virt(irb->scsw.cmd.cpa)) - 2;
+ len += sprintf(page + len, "......\n");
}
- end = min((struct ccw1 *)(addr_t) irb->scsw.cmd.cpa + 2, last);
+ end = min((struct ccw1 *)dma32_to_virt(irb->scsw.cmd.cpa) + 2, last);
while (act <= end) {
- len += sprintf(page + len, PRINTK_HEADER
- " CCW %p: %08X %08X DAT:",
+ len += sprintf(page + len, "CCW %px: %08X %08X DAT:",
act, ((int *) act)[0], ((int *) act)[1]);
for (count = 0; count < 32 && count < act->count;
count += sizeof(int))
len += sprintf(page + len, " %08X",
- ((int *) (addr_t) act->cda)
+ ((int *)dma32_to_virt(act->cda))
[(count>>2)]);
len += sprintf(page + len, "\n");
act++;
@@ -742,54 +730,27 @@ dasd_fba_dump_sense(struct dasd_device *device, struct dasd_ccw_req * req,
/* print last CCWs */
if (act < last - 2) {
act = last - 2;
- len += sprintf(page + len, PRINTK_HEADER "......\n");
+ len += sprintf(page + len, "......\n");
}
while (act <= last) {
- len += sprintf(page + len, PRINTK_HEADER
- " CCW %p: %08X %08X DAT:",
+ len += sprintf(page + len, "CCW %px: %08X %08X DAT:",
act, ((int *) act)[0], ((int *) act)[1]);
for (count = 0; count < 32 && count < act->count;
count += sizeof(int))
len += sprintf(page + len, " %08X",
- ((int *) (addr_t) act->cda)
+ ((int *)dma32_to_virt(act->cda))
[(count>>2)]);
len += sprintf(page + len, "\n");
act++;
}
if (len > 0)
- printk(KERN_ERR "%s", page);
+ dev_err(dev, "%s", page);
free_page((unsigned long) page);
}
-/*
- * Initialize block layer request queue.
- */
-static void dasd_fba_setup_blk_queue(struct dasd_block *block)
+static unsigned int dasd_fba_max_sectors(struct dasd_block *block)
{
- unsigned int logical_block_size = block->bp_block;
- struct request_queue *q = block->gdp->queue;
- unsigned int max_bytes, max_discard_sectors;
- int max;
-
- max = DASD_FBA_MAX_BLOCKS << block->s2b_shift;
- blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
- q->limits.max_dev_sectors = max;
- blk_queue_logical_block_size(q, logical_block_size);
- blk_queue_max_hw_sectors(q, max);
- blk_queue_max_segments(q, USHRT_MAX);
- /* With page sized segments each segment can be translated into one idaw/tidaw */
- blk_queue_max_segment_size(q, PAGE_SIZE);
- blk_queue_segment_boundary(q, PAGE_SIZE - 1);
-
- q->limits.discard_granularity = logical_block_size;
-
- /* Calculate max_discard_sectors and make it PAGE aligned */
- max_bytes = USHRT_MAX * logical_block_size;
- max_bytes = ALIGN_DOWN(max_bytes, PAGE_SIZE);
- max_discard_sectors = max_bytes / logical_block_size;
-
- blk_queue_max_discard_sectors(q, max_discard_sectors);
- blk_queue_max_write_zeroes_sectors(q, max_discard_sectors);
+ return DASD_FBA_MAX_BLOCKS << block->s2b_shift;
}
static int dasd_fba_pe_handler(struct dasd_device *device,
@@ -802,10 +763,11 @@ static struct dasd_discipline dasd_fba_discipline = {
.owner = THIS_MODULE,
.name = "FBA ",
.ebcname = "FBA ",
+ .has_discard = true,
.check_device = dasd_fba_check_characteristics,
.do_analysis = dasd_fba_do_analysis,
.pe_handler = dasd_fba_pe_handler,
- .setup_blk_queue = dasd_fba_setup_blk_queue,
+ .max_sectors = dasd_fba_max_sectors,
.fill_geometry = dasd_fba_fill_geometry,
.start_IO = dasd_start_IO,
.term_IO = dasd_term_IO,
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index 55e3abe94c..4533dd055c 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -11,8 +11,6 @@
*
*/
-#define KMSG_COMPONENT "dasd"
-
#include <linux/interrupt.h>
#include <linux/major.h>
#include <linux/fs.h>
@@ -20,9 +18,6 @@
#include <linux/uaccess.h>
-/* This is ugly... */
-#define PRINTK_HEADER "dasd_gendisk:"
-
#include "dasd_int.h"
static unsigned int queue_depth = 32;
@@ -39,6 +34,16 @@ MODULE_PARM_DESC(nr_hw_queues, "Default number of hardware queues for new DASD d
*/
int dasd_gendisk_alloc(struct dasd_block *block)
{
+ struct queue_limits lim = {
+ /*
+ * With page sized segments, each segment can be translated into
+ * one idaw/tidaw.
+ */
+ .max_segment_size = PAGE_SIZE,
+ .seg_boundary_mask = PAGE_SIZE - 1,
+ .dma_alignment = PAGE_SIZE - 1,
+ .max_segments = USHRT_MAX,
+ };
struct gendisk *gdp;
struct dasd_device *base;
int len, rc;
@@ -58,11 +63,12 @@ int dasd_gendisk_alloc(struct dasd_block *block)
if (rc)
return rc;
- gdp = blk_mq_alloc_disk(&block->tag_set, block);
+ gdp = blk_mq_alloc_disk(&block->tag_set, &lim, block);
if (IS_ERR(gdp)) {
blk_mq_free_tag_set(&block->tag_set);
return PTR_ERR(gdp);
}
+ blk_queue_flag_set(QUEUE_FLAG_NONROT, gdp->queue);
/* Initialize gendisk structure. */
gdp->major = DASD_MAJOR;
@@ -127,15 +133,15 @@ void dasd_gendisk_free(struct dasd_block *block)
*/
int dasd_scan_partitions(struct dasd_block *block)
{
- struct bdev_handle *bdev_handle;
+ struct file *bdev_file;
int rc;
- bdev_handle = bdev_open_by_dev(disk_devt(block->gdp), BLK_OPEN_READ,
+ bdev_file = bdev_file_open_by_dev(disk_devt(block->gdp), BLK_OPEN_READ,
NULL, NULL);
- if (IS_ERR(bdev_handle)) {
+ if (IS_ERR(bdev_file)) {
DBF_DEV_EVENT(DBF_ERR, block->base,
"scan partitions error, blkdev_get returned %ld",
- PTR_ERR(bdev_handle));
+ PTR_ERR(bdev_file));
return -ENODEV;
}
@@ -147,15 +153,15 @@ int dasd_scan_partitions(struct dasd_block *block)
"scan partitions error, rc %d", rc);
/*
- * Since the matching bdev_release() call to the
- * bdev_open_by_path() in this function is not called before
+ * Since the matching fput() call to the
+ * bdev_file_open_by_path() in this function is not called before
* dasd_destroy_partitions the offline open_count limit needs to be
- * increased from 0 to 1. This is done by setting device->bdev_handle
+ * increased from 0 to 1. This is done by setting device->bdev_file
* (see dasd_generic_set_offline). As long as the partition detection
* is running no offline should be allowed. That is why the assignment
- * to block->bdev_handle is done AFTER the BLKRRPART ioctl.
+ * to block->bdev_file is done AFTER the BLKRRPART ioctl.
*/
- block->bdev_handle = bdev_handle;
+ block->bdev_file = bdev_file;
return 0;
}
@@ -165,21 +171,21 @@ int dasd_scan_partitions(struct dasd_block *block)
*/
void dasd_destroy_partitions(struct dasd_block *block)
{
- struct bdev_handle *bdev_handle;
+ struct file *bdev_file;
/*
- * Get the bdev_handle pointer from the device structure and clear
- * device->bdev_handle to lower the offline open_count limit again.
+ * Get the bdev_file pointer from the device structure and clear
+ * device->bdev_file to lower the offline open_count limit again.
*/
- bdev_handle = block->bdev_handle;
- block->bdev_handle = NULL;
+ bdev_file = block->bdev_file;
+ block->bdev_file = NULL;
- mutex_lock(&bdev_handle->bdev->bd_disk->open_mutex);
- bdev_disk_changed(bdev_handle->bdev->bd_disk, true);
- mutex_unlock(&bdev_handle->bdev->bd_disk->open_mutex);
+ mutex_lock(&file_bdev(bdev_file)->bd_disk->open_mutex);
+ bdev_disk_changed(file_bdev(bdev_file)->bd_disk, true);
+ mutex_unlock(&file_bdev(bdev_file)->bd_disk->open_mutex);
/* Matching blkdev_put to the blkdev_get in dasd_scan_partitions. */
- bdev_release(bdev_handle);
+ fput(bdev_file);
}
int dasd_gendisk_init(void)
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 1b1b8a41c4..e5f40536b4 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -113,9 +113,6 @@ do { \
__dev_id.ssid, __dev_id.devno, d_data); \
} while (0)
-/* limit size for an errorstring */
-#define ERRORLENGTH 30
-
/* definition of dbf debug levels */
#define DBF_EMERG 0 /* system is unusable */
#define DBF_ALERT 1 /* action must be taken immediately */
@@ -126,32 +123,6 @@ do { \
#define DBF_INFO 6 /* informational */
#define DBF_DEBUG 6 /* debug-level messages */
-/* messages to be written via klogd and dbf */
-#define DEV_MESSAGE(d_loglevel,d_device,d_string,d_args...)\
-do { \
- printk(d_loglevel PRINTK_HEADER " %s: " d_string "\n", \
- dev_name(&d_device->cdev->dev), d_args); \
- DBF_DEV_EVENT(DBF_ALERT, d_device, d_string, d_args); \
-} while(0)
-
-#define MESSAGE(d_loglevel,d_string,d_args...)\
-do { \
- printk(d_loglevel PRINTK_HEADER " " d_string "\n", d_args); \
- DBF_EVENT(DBF_ALERT, d_string, d_args); \
-} while(0)
-
-/* messages to be written via klogd only */
-#define DEV_MESSAGE_LOG(d_loglevel,d_device,d_string,d_args...)\
-do { \
- printk(d_loglevel PRINTK_HEADER " %s: " d_string "\n", \
- dev_name(&d_device->cdev->dev), d_args); \
-} while(0)
-
-#define MESSAGE_LOG(d_loglevel,d_string,d_args...)\
-do { \
- printk(d_loglevel PRINTK_HEADER " " d_string "\n", d_args); \
-} while(0)
-
/* Macro to calculate number of blocks per page */
#define BLOCKS_PER_PAGE(blksize) (PAGE_SIZE / blksize)
@@ -322,6 +293,7 @@ struct dasd_discipline {
struct module *owner;
char ebcname[8]; /* a name used for tagging and printks */
char name[8]; /* a name used for tagging and printks */
+ bool has_discard;
struct list_head list; /* used for list of disciplines */
@@ -360,10 +332,7 @@ struct dasd_discipline {
int (*online_to_ready) (struct dasd_device *);
int (*basic_to_known)(struct dasd_device *);
- /*
- * Initialize block layer request queue.
- */
- void (*setup_blk_queue)(struct dasd_block *);
+ unsigned int (*max_sectors)(struct dasd_block *);
/* (struct dasd_device *);
* Device operation functions. build_cp creates a ccw chain for
* a block device request, start_io starts the request and
@@ -650,7 +619,7 @@ struct dasd_block {
struct gendisk *gdp;
spinlock_t request_queue_lock;
struct blk_mq_tag_set tag_set;
- struct bdev_handle *bdev_handle;
+ struct file *bdev_file;
atomic_t open_count;
unsigned long blocks; /* size of volume in blocks */
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c
index 61b9675e2a..7e0ed7032f 100644
--- a/drivers/s390/block/dasd_ioctl.c
+++ b/drivers/s390/block/dasd_ioctl.c
@@ -10,8 +10,6 @@
* i/o controls for the dasd driver.
*/
-#define KMSG_COMPONENT "dasd"
-
#include <linux/interrupt.h>
#include <linux/compat.h>
#include <linux/major.h>
@@ -24,12 +22,8 @@
#include <linux/uaccess.h>
#include <linux/dasd_mod.h>
-/* This is ugly... */
-#define PRINTK_HEADER "dasd_ioctl:"
-
#include "dasd_int.h"
-
static int
dasd_ioctl_api_version(void __user *argp)
{
@@ -537,7 +531,7 @@ static int __dasd_ioctl_information(struct dasd_block *block,
* This must be hidden from user-space.
*/
dasd_info->open_count = atomic_read(&block->open_count);
- if (!block->bdev_handle)
+ if (!block->bdev_file)
dasd_info->open_count++;
/*
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c
index 62a859ea67..0faaa437d9 100644
--- a/drivers/s390/block/dasd_proc.c
+++ b/drivers/s390/block/dasd_proc.c
@@ -11,8 +11,6 @@
*
*/
-#define KMSG_COMPONENT "dasd"
-
#include <linux/ctype.h>
#include <linux/slab.h>
#include <linux/string.h>
@@ -23,9 +21,6 @@
#include <asm/debug.h>
#include <linux/uaccess.h>
-/* This is ugly... */
-#define PRINTK_HEADER "dasd_proc:"
-
#include "dasd_int.h"
static struct proc_dir_entry *dasd_proc_root_entry = NULL;
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 4b7ecd4fd4..6d1689a271 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -546,9 +546,13 @@ static const struct attribute_group *dcssblk_dev_attr_groups[] = {
static ssize_t
dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
{
+ struct queue_limits lim = {
+ .logical_block_size = 4096,
+ };
int rc, i, j, num_of_segments;
struct dcssblk_dev_info *dev_info;
struct segment_info *seg_info, *temp;
+ struct dax_device *dax_dev;
char *local_buf;
unsigned long seg_byte_size;
@@ -629,9 +633,9 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
dev_info->dev.release = dcssblk_release_segment;
dev_info->dev.groups = dcssblk_dev_attr_groups;
INIT_LIST_HEAD(&dev_info->lh);
- dev_info->gd = blk_alloc_disk(NUMA_NO_NODE);
- if (dev_info->gd == NULL) {
- rc = -ENOMEM;
+ dev_info->gd = blk_alloc_disk(&lim, NUMA_NO_NODE);
+ if (IS_ERR(dev_info->gd)) {
+ rc = PTR_ERR(dev_info->gd);
goto seg_list_del;
}
dev_info->gd->major = dcssblk_major;
@@ -639,7 +643,6 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
dev_info->gd->fops = &dcssblk_devops;
dev_info->gd->private_data = dev_info;
dev_info->gd->flags |= GENHD_FL_NO_PART;
- blk_queue_logical_block_size(dev_info->gd->queue, 4096);
blk_queue_flag_set(QUEUE_FLAG_DAX, dev_info->gd->queue);
seg_byte_size = (dev_info->end - dev_info->start + 1);
@@ -677,13 +680,13 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
if (rc)
goto put_dev;
- dev_info->dax_dev = alloc_dax(dev_info, &dcssblk_dax_ops);
- if (IS_ERR(dev_info->dax_dev)) {
- rc = PTR_ERR(dev_info->dax_dev);
- dev_info->dax_dev = NULL;
+ dax_dev = alloc_dax(dev_info, &dcssblk_dax_ops);
+ if (IS_ERR(dax_dev)) {
+ rc = PTR_ERR(dax_dev);
goto put_dev;
}
- set_dax_synchronous(dev_info->dax_dev);
+ set_dax_synchronous(dax_dev);
+ dev_info->dax_dev = dax_dev;
rc = dax_add_host(dev_info->dax_dev, dev_info->gd);
if (rc)
goto out_dax;
@@ -917,7 +920,7 @@ __dcssblk_direct_access(struct dcssblk_dev_info *dev_info, pgoff_t pgoff,
dev_sz = dev_info->end - dev_info->start + 1;
if (kaddr)
- *kaddr = (void *) dev_info->start + offset;
+ *kaddr = __va(dev_info->start + offset);
if (pfn)
*pfn = __pfn_to_pfn_t(PFN_DOWN(dev_info->start + offset),
PFN_DEV|PFN_SPECIAL);
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index ade95e91b3..1d456a5a3b 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -131,7 +131,7 @@ static void scm_request_done(struct scm_request *scmrq)
for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) {
msb = &scmrq->aob->msb[i];
- aidaw = (u64)phys_to_virt(msb->data_addr);
+ aidaw = (u64)dma64_to_virt(msb->data_addr);
if ((msb->flags & MSB_FLAG_IDA) && aidaw &&
IS_ALIGNED(aidaw, PAGE_SIZE))
@@ -196,12 +196,12 @@ static int scm_request_prepare(struct scm_request *scmrq)
msb->scm_addr = scmdev->address + ((u64) blk_rq_pos(req) << 9);
msb->oc = (rq_data_dir(req) == READ) ? MSB_OC_READ : MSB_OC_WRITE;
msb->flags |= MSB_FLAG_IDA;
- msb->data_addr = (u64)virt_to_phys(aidaw);
+ msb->data_addr = virt_to_dma64(aidaw);
rq_for_each_segment(bv, req, iter) {
WARN_ON(bv.bv_offset);
msb->blk_count += bv.bv_len >> 12;
- aidaw->data_addr = virt_to_phys(page_address(bv.bv_page));
+ aidaw->data_addr = virt_to_dma64(page_address(bv.bv_page));
aidaw++;
}
@@ -435,10 +435,17 @@ static const struct blk_mq_ops scm_mq_ops = {
int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
{
- unsigned int devindex, nr_max_blk;
+ struct queue_limits lim = {
+ .logical_block_size = 1 << 12,
+ };
+ unsigned int devindex;
struct request_queue *rq;
int len, ret;
+ lim.max_segments = min(scmdev->nr_max_block,
+ (unsigned int) (PAGE_SIZE / sizeof(struct aidaw)));
+ lim.max_hw_sectors = lim.max_segments << 3; /* 8 * 512 = blk_size */
+
devindex = atomic_inc_return(&nr_devices) - 1;
/* scma..scmz + scmaa..scmzz */
if (devindex > 701) {
@@ -462,18 +469,12 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
if (ret)
goto out;
- bdev->gendisk = blk_mq_alloc_disk(&bdev->tag_set, scmdev);
+ bdev->gendisk = blk_mq_alloc_disk(&bdev->tag_set, &lim, scmdev);
if (IS_ERR(bdev->gendisk)) {
ret = PTR_ERR(bdev->gendisk);
goto out_tag;
}
rq = bdev->rq = bdev->gendisk->queue;
- nr_max_blk = min(scmdev->nr_max_block,
- (unsigned int) (PAGE_SIZE / sizeof(struct aidaw)));
-
- blk_queue_logical_block_size(rq, 1 << 12);
- blk_queue_max_hw_sectors(rq, nr_max_blk << 3); /* 8 * 512 = blk_size */
- blk_queue_max_segments(rq, nr_max_blk);
blk_queue_flag_set(QUEUE_FLAG_NONROT, rq);
blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, rq);
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c
index 0b0324fe4a..dcb3c32f02 100644
--- a/drivers/s390/char/con3215.c
+++ b/drivers/s390/char/con3215.c
@@ -159,7 +159,7 @@ static void raw3215_mk_read_req(struct raw3215_info *raw)
ccw->cmd_code = 0x0A; /* read inquiry */
ccw->flags = 0x20; /* ignore incorrect length */
ccw->count = 160;
- ccw->cda = (__u32)__pa(raw->inbuf);
+ ccw->cda = virt_to_dma32(raw->inbuf);
}
/*
@@ -218,7 +218,7 @@ static void raw3215_mk_write_req(struct raw3215_info *raw)
ccw[-1].flags |= 0x40; /* use command chaining */
ccw->cmd_code = 0x01; /* write, auto carrier return */
ccw->flags = 0x20; /* ignore incorrect length ind. */
- ccw->cda = (__u32)__pa(raw->buffer + ix);
+ ccw->cda = virt_to_dma32(raw->buffer + ix);
count = len;
if (ix + count > RAW3215_BUFFER_SIZE)
count = RAW3215_BUFFER_SIZE - ix;
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c
index 4f26b0a556..4d824f86bb 100644
--- a/drivers/s390/char/fs3270.c
+++ b/drivers/s390/char/fs3270.c
@@ -126,7 +126,7 @@ static int fs3270_activate(struct raw3270_view *view)
raw3270_request_set_cmd(fp->init, TC_EWRITEA);
raw3270_request_set_idal(fp->init, fp->rdbuf);
fp->init->rescnt = 0;
- cp = fp->rdbuf->data[0];
+ cp = dma64_to_virt(fp->rdbuf->data[0]);
if (fp->rdbuf_size == 0) {
/* No saved buffer. Just clear the screen. */
fp->init->ccw.count = 1;
@@ -164,7 +164,7 @@ static void fs3270_save_callback(struct raw3270_request *rq, void *data)
fp = (struct fs3270 *)rq->view;
/* Correct idal buffer element 0 address. */
- fp->rdbuf->data[0] -= 5;
+ fp->rdbuf->data[0] = dma64_add(fp->rdbuf->data[0], -5);
fp->rdbuf->size += 5;
/*
@@ -202,7 +202,7 @@ static void fs3270_deactivate(struct raw3270_view *view)
* room for the TW_KR/TO_SBA/<address>/<address>/TO_IC sequence
* in the activation command.
*/
- fp->rdbuf->data[0] += 5;
+ fp->rdbuf->data[0] = dma64_add(fp->rdbuf->data[0], 5);
fp->rdbuf->size -= 5;
raw3270_request_set_idal(fp->init, fp->rdbuf);
fp->init->rescnt = 0;
@@ -521,13 +521,13 @@ static const struct file_operations fs3270_fops = {
static void fs3270_create_cb(int minor)
{
__register_chrdev(IBM_FS3270_MAJOR, minor, 1, "tub", &fs3270_fops);
- device_create(class3270, NULL, MKDEV(IBM_FS3270_MAJOR, minor),
+ device_create(&class3270, NULL, MKDEV(IBM_FS3270_MAJOR, minor),
NULL, "3270/tub%d", minor);
}
static void fs3270_destroy_cb(int minor)
{
- device_destroy(class3270, MKDEV(IBM_FS3270_MAJOR, minor));
+ device_destroy(&class3270, MKDEV(IBM_FS3270_MAJOR, minor));
__unregister_chrdev(IBM_FS3270_MAJOR, minor, 1, "tub");
}
@@ -546,7 +546,7 @@ static int __init fs3270_init(void)
rc = __register_chrdev(IBM_FS3270_MAJOR, 0, 1, "fs3270", &fs3270_fops);
if (rc)
return rc;
- device_create(class3270, NULL, MKDEV(IBM_FS3270_MAJOR, 0),
+ device_create(&class3270, NULL, MKDEV(IBM_FS3270_MAJOR, 0),
NULL, "3270/tub");
raw3270_register_notifier(&fs3270_notifier);
return 0;
@@ -555,7 +555,7 @@ static int __init fs3270_init(void)
static void __exit fs3270_exit(void)
{
raw3270_unregister_notifier(&fs3270_notifier);
- device_destroy(class3270, MKDEV(IBM_FS3270_MAJOR, 0));
+ device_destroy(&class3270, MKDEV(IBM_FS3270_MAJOR, 0));
__unregister_chrdev(IBM_FS3270_MAJOR, 0, 1, "fs3270");
}
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index 7115c0f856..c57694be9b 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -29,7 +29,9 @@
#include <linux/device.h>
#include <linux/mutex.h>
-struct class *class3270;
+const struct class class3270 = {
+ .name = "3270",
+};
EXPORT_SYMBOL(class3270);
/* The main 3270 data structure. */
@@ -160,7 +162,8 @@ struct raw3270_request *raw3270_request_alloc(size_t size)
/*
* Setup ccw.
*/
- rq->ccw.cda = __pa(rq->buffer);
+ if (rq->buffer)
+ rq->ccw.cda = virt_to_dma32(rq->buffer);
rq->ccw.flags = CCW_FLAG_SLI;
return rq;
@@ -186,7 +189,8 @@ int raw3270_request_reset(struct raw3270_request *rq)
return -EBUSY;
rq->ccw.cmd_code = 0;
rq->ccw.count = 0;
- rq->ccw.cda = __pa(rq->buffer);
+ if (rq->buffer)
+ rq->ccw.cda = virt_to_dma32(rq->buffer);
rq->ccw.flags = CCW_FLAG_SLI;
rq->rescnt = 0;
rq->rc = 0;
@@ -221,7 +225,7 @@ EXPORT_SYMBOL(raw3270_request_add_data);
*/
void raw3270_request_set_data(struct raw3270_request *rq, void *data, size_t size)
{
- rq->ccw.cda = __pa(data);
+ rq->ccw.cda = virt_to_dma32(data);
rq->ccw.count = size;
}
EXPORT_SYMBOL(raw3270_request_set_data);
@@ -231,7 +235,7 @@ EXPORT_SYMBOL(raw3270_request_set_data);
*/
void raw3270_request_set_idal(struct raw3270_request *rq, struct idal_buffer *ib)
{
- rq->ccw.cda = __pa(ib->data);
+ rq->ccw.cda = virt_to_dma32(ib->data);
rq->ccw.count = ib->size;
rq->ccw.flags |= CCW_FLAG_IDA;
}
@@ -577,7 +581,7 @@ static void raw3270_read_modified(struct raw3270 *rp)
rp->init_readmod.ccw.cmd_code = TC_READMOD;
rp->init_readmod.ccw.flags = CCW_FLAG_SLI;
rp->init_readmod.ccw.count = sizeof(rp->init_data);
- rp->init_readmod.ccw.cda = (__u32)__pa(rp->init_data);
+ rp->init_readmod.ccw.cda = virt_to_dma32(rp->init_data);
rp->init_readmod.callback = raw3270_read_modified_cb;
rp->init_readmod.callback_data = rp->init_data;
rp->state = RAW3270_STATE_READMOD;
@@ -597,7 +601,7 @@ static void raw3270_writesf_readpart(struct raw3270 *rp)
rp->init_readpart.ccw.cmd_code = TC_WRITESF;
rp->init_readpart.ccw.flags = CCW_FLAG_SLI;
rp->init_readpart.ccw.count = sizeof(wbuf);
- rp->init_readpart.ccw.cda = (__u32)__pa(&rp->init_data);
+ rp->init_readpart.ccw.cda = virt_to_dma32(&rp->init_data);
rp->state = RAW3270_STATE_W4ATTN;
raw3270_start_irq(&rp->init_view, &rp->init_readpart);
}
@@ -635,7 +639,7 @@ static int __raw3270_reset_device(struct raw3270 *rp)
rp->init_reset.ccw.cmd_code = TC_EWRITEA;
rp->init_reset.ccw.flags = CCW_FLAG_SLI;
rp->init_reset.ccw.count = 1;
- rp->init_reset.ccw.cda = (__u32)__pa(rp->init_data);
+ rp->init_reset.ccw.cda = virt_to_dma32(rp->init_data);
rp->init_reset.callback = raw3270_reset_device_cb;
rc = __raw3270_start(rp, &rp->init_view, &rp->init_reset);
if (rc == 0 && rp->state == RAW3270_STATE_INIT)
@@ -1316,23 +1320,25 @@ static int raw3270_init(void)
return 0;
raw3270_registered = 1;
rc = ccw_driver_register(&raw3270_ccw_driver);
- if (rc == 0) {
- /* Create attributes for early (= console) device. */
- mutex_lock(&raw3270_mutex);
- class3270 = class_create("3270");
- list_for_each_entry(rp, &raw3270_devices, list) {
- get_device(&rp->cdev->dev);
- raw3270_create_attributes(rp);
- }
- mutex_unlock(&raw3270_mutex);
+ if (rc)
+ return rc;
+ rc = class_register(&class3270);
+ if (rc)
+ return rc;
+ /* Create attributes for early (= console) device. */
+ mutex_lock(&raw3270_mutex);
+ list_for_each_entry(rp, &raw3270_devices, list) {
+ get_device(&rp->cdev->dev);
+ raw3270_create_attributes(rp);
}
- return rc;
+ mutex_unlock(&raw3270_mutex);
+ return 0;
}
static void raw3270_exit(void)
{
ccw_driver_unregister(&raw3270_ccw_driver);
- class_destroy(class3270);
+ class_unregister(&class3270);
}
MODULE_LICENSE("GPL");
diff --git a/drivers/s390/char/raw3270.h b/drivers/s390/char/raw3270.h
index b1beecc7a0..5040c7e0e0 100644
--- a/drivers/s390/char/raw3270.h
+++ b/drivers/s390/char/raw3270.h
@@ -14,7 +14,7 @@
struct raw3270;
struct raw3270_view;
-extern struct class *class3270;
+extern const struct class class3270;
/* 3270 CCW request */
struct raw3270_request {
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index 11c428f4c7..7815e9bea6 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -18,6 +18,7 @@
#include <linux/mm.h>
#include <linux/mmzone.h>
#include <linux/memory.h>
+#include <linux/memory_hotplug.h>
#include <linux/module.h>
#include <asm/ctlreg.h>
#include <asm/chpid.h>
@@ -26,6 +27,7 @@
#include <asm/sclp.h>
#include <asm/numa.h>
#include <asm/facility.h>
+#include <asm/page-states.h>
#include "sclp.h"
@@ -340,16 +342,38 @@ static int sclp_mem_notifier(struct notifier_block *nb,
if (contains_standby_increment(start, start + size))
rc = -EPERM;
break;
- case MEM_ONLINE:
- case MEM_CANCEL_OFFLINE:
- break;
- case MEM_GOING_ONLINE:
+ case MEM_PREPARE_ONLINE:
+ /*
+ * Access the altmap_start_pfn and altmap_nr_pages fields
+ * within the struct memory_notify specifically when dealing
+ * with only MEM_PREPARE_ONLINE/MEM_FINISH_OFFLINE notifiers.
+ *
+ * When altmap is in use, take the specified memory range
+ * online, which includes the altmap.
+ */
+ if (arg->altmap_nr_pages) {
+ start = PFN_PHYS(arg->altmap_start_pfn);
+ size += PFN_PHYS(arg->altmap_nr_pages);
+ }
rc = sclp_mem_change_state(start, size, 1);
+ if (rc || !arg->altmap_nr_pages)
+ break;
+ /*
+ * Set CMMA state to nodat here, since the struct page memory
+ * at the beginning of the memory block will not go through the
+ * buddy allocator later.
+ */
+ __arch_set_page_nodat((void *)__va(start), arg->altmap_nr_pages);
break;
- case MEM_CANCEL_ONLINE:
- sclp_mem_change_state(start, size, 0);
- break;
- case MEM_OFFLINE:
+ case MEM_FINISH_OFFLINE:
+ /*
+ * When altmap is in use, take the specified memory range
+ * offline, which includes the altmap.
+ */
+ if (arg->altmap_nr_pages) {
+ start = PFN_PHYS(arg->altmap_start_pfn);
+ size += PFN_PHYS(arg->altmap_nr_pages);
+ }
sclp_mem_change_state(start, size, 0);
break;
default:
@@ -400,7 +424,9 @@ static void __init add_memory_merged(u16 rn)
if (!size)
goto skip_add;
for (addr = start; addr < start + size; addr += block_size)
- add_memory(0, addr, block_size, MHP_NONE);
+ add_memory(0, addr, block_size,
+ MACHINE_HAS_EDAT1 ?
+ MHP_MEMMAP_ON_MEMORY | MHP_OFFLINE_INACCESSIBLE : MHP_NONE);
skip_add:
first_rn = rn;
num = 1;
diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h
index 4e5d5efa97..0aba30efb4 100644
--- a/drivers/s390/char/tape.h
+++ b/drivers/s390/char/tape.h
@@ -305,7 +305,9 @@ tape_ccw_cc(struct ccw1 *ccw, __u8 cmd_code, __u16 memsize, void *cda)
ccw->cmd_code = cmd_code;
ccw->flags = CCW_FLAG_CC;
ccw->count = memsize;
- ccw->cda = (__u32)(addr_t) cda;
+ ccw->cda = 0;
+ if (cda)
+ ccw->cda = virt_to_dma32(cda);
return ccw + 1;
}
@@ -315,7 +317,9 @@ tape_ccw_end(struct ccw1 *ccw, __u8 cmd_code, __u16 memsize, void *cda)
ccw->cmd_code = cmd_code;
ccw->flags = 0;
ccw->count = memsize;
- ccw->cda = (__u32)(addr_t) cda;
+ ccw->cda = 0;
+ if (cda)
+ ccw->cda = virt_to_dma32(cda);
return ccw + 1;
}
@@ -325,7 +329,7 @@ tape_ccw_cmd(struct ccw1 *ccw, __u8 cmd_code)
ccw->cmd_code = cmd_code;
ccw->flags = 0;
ccw->count = 0;
- ccw->cda = (__u32)(addr_t) &ccw->cmd_code;
+ ccw->cda = virt_to_dma32(&ccw->cmd_code);
return ccw + 1;
}
@@ -336,7 +340,7 @@ tape_ccw_repeat(struct ccw1 *ccw, __u8 cmd_code, int count)
ccw->cmd_code = cmd_code;
ccw->flags = CCW_FLAG_CC;
ccw->count = 0;
- ccw->cda = (__u32)(addr_t) &ccw->cmd_code;
+ ccw->cda = virt_to_dma32(&ccw->cmd_code);
ccw++;
}
return ccw;
diff --git a/drivers/s390/char/tape_class.c b/drivers/s390/char/tape_class.c
index 277a0f903d..eae362bbfb 100644
--- a/drivers/s390/char/tape_class.c
+++ b/drivers/s390/char/tape_class.c
@@ -22,7 +22,9 @@ MODULE_DESCRIPTION(
);
MODULE_LICENSE("GPL");
-static struct class *tape_class;
+static const struct class tape_class = {
+ .name = "tape390",
+};
/*
* Register a tape device and return a pointer to the cdev structure.
@@ -74,7 +76,7 @@ struct tape_class_device *register_tape_dev(
if (rc)
goto fail_with_cdev;
- tcd->class_device = device_create(tape_class, device,
+ tcd->class_device = device_create(&tape_class, device,
tcd->char_device->dev, NULL,
"%s", tcd->device_name);
rc = PTR_ERR_OR_ZERO(tcd->class_device);
@@ -91,7 +93,7 @@ struct tape_class_device *register_tape_dev(
return tcd;
fail_with_class_device:
- device_destroy(tape_class, tcd->char_device->dev);
+ device_destroy(&tape_class, tcd->char_device->dev);
fail_with_cdev:
cdev_del(tcd->char_device);
@@ -107,7 +109,7 @@ void unregister_tape_dev(struct device *device, struct tape_class_device *tcd)
{
if (tcd != NULL && !IS_ERR(tcd)) {
sysfs_remove_link(&device->kobj, tcd->mode_name);
- device_destroy(tape_class, tcd->char_device->dev);
+ device_destroy(&tape_class, tcd->char_device->dev);
cdev_del(tcd->char_device);
kfree(tcd);
}
@@ -117,15 +119,12 @@ EXPORT_SYMBOL(unregister_tape_dev);
static int __init tape_init(void)
{
- tape_class = class_create("tape390");
-
- return 0;
+ return class_register(&tape_class);
}
static void __exit tape_exit(void)
{
- class_destroy(tape_class);
- tape_class = NULL;
+ class_unregister(&tape_class);
}
postcore_initcall(tape_init);
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index 6946ba9a9d..d7e408c8d0 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -679,7 +679,9 @@ static const struct attribute_group *vmlogrdr_attr_groups[] = {
NULL,
};
-static struct class *vmlogrdr_class;
+static const struct class vmlogrdr_class = {
+ .name = "vmlogrdr_class",
+};
static struct device_driver vmlogrdr_driver = {
.name = "vmlogrdr",
.bus = &iucv_bus,
@@ -699,12 +701,9 @@ static int vmlogrdr_register_driver(void)
if (ret)
goto out_iucv;
- vmlogrdr_class = class_create("vmlogrdr");
- if (IS_ERR(vmlogrdr_class)) {
- ret = PTR_ERR(vmlogrdr_class);
- vmlogrdr_class = NULL;
+ ret = class_register(&vmlogrdr_class);
+ if (ret)
goto out_driver;
- }
return 0;
out_driver:
@@ -718,8 +717,7 @@ out:
static void vmlogrdr_unregister_driver(void)
{
- class_destroy(vmlogrdr_class);
- vmlogrdr_class = NULL;
+ class_unregister(&vmlogrdr_class);
driver_unregister(&vmlogrdr_driver);
iucv_unregister(&vmlogrdr_iucv_handler, 1);
}
@@ -754,7 +752,7 @@ static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
return ret;
}
- priv->class_device = device_create(vmlogrdr_class, dev,
+ priv->class_device = device_create(&vmlogrdr_class, dev,
MKDEV(vmlogrdr_major,
priv->minor_num),
priv, "%s", dev_name(dev));
@@ -771,7 +769,7 @@ static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
static int vmlogrdr_unregister_device(struct vmlogrdr_priv_t *priv)
{
- device_destroy(vmlogrdr_class, MKDEV(vmlogrdr_major, priv->minor_num));
+ device_destroy(&vmlogrdr_class, MKDEV(vmlogrdr_major, priv->minor_num));
if (priv->device != NULL) {
device_unregister(priv->device);
priv->device=NULL;
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c
index 82efdd20ad..fe94dec427 100644
--- a/drivers/s390/char/vmur.c
+++ b/drivers/s390/char/vmur.c
@@ -48,7 +48,9 @@ MODULE_DESCRIPTION("s390 z/VM virtual unit record device driver");
MODULE_LICENSE("GPL");
static dev_t ur_first_dev_maj_min;
-static struct class *vmur_class;
+static const struct class vmur_class = {
+ .name = "vmur",
+};
static struct debug_info *vmur_dbf;
/* We put the device's record length (for writes) in the driver_info field */
@@ -195,7 +197,7 @@ static void free_chan_prog(struct ccw1 *cpa)
struct ccw1 *ptr = cpa;
while (ptr->cda) {
- kfree((void *)(addr_t) ptr->cda);
+ kfree(dma32_to_virt(ptr->cda));
ptr++;
}
kfree(cpa);
@@ -237,7 +239,7 @@ static struct ccw1 *alloc_chan_prog(const char __user *ubuf, int rec_count,
free_chan_prog(cpa);
return ERR_PTR(-ENOMEM);
}
- cpa[i].cda = (u32)(addr_t) kbuf;
+ cpa[i].cda = virt_to_dma32(kbuf);
if (copy_from_user(kbuf, ubuf, reclen)) {
free_chan_prog(cpa);
return ERR_PTR(-EFAULT);
@@ -912,7 +914,7 @@ static int ur_set_online(struct ccw_device *cdev)
goto fail_free_cdev;
}
- urd->device = device_create(vmur_class, &cdev->dev,
+ urd->device = device_create(&vmur_class, &cdev->dev,
urd->char_device->dev, NULL, "%s", node_id);
if (IS_ERR(urd->device)) {
rc = PTR_ERR(urd->device);
@@ -958,7 +960,7 @@ static int ur_set_offline_force(struct ccw_device *cdev, int force)
/* Work not run yet - need to release reference here */
urdev_put(urd);
}
- device_destroy(vmur_class, urd->char_device->dev);
+ device_destroy(&vmur_class, urd->char_device->dev);
cdev_del(urd->char_device);
urd->char_device = NULL;
rc = 0;
@@ -1022,11 +1024,9 @@ static int __init ur_init(void)
debug_set_level(vmur_dbf, 6);
- vmur_class = class_create("vmur");
- if (IS_ERR(vmur_class)) {
- rc = PTR_ERR(vmur_class);
+ rc = class_register(&vmur_class);
+ if (rc)
goto fail_free_dbf;
- }
rc = ccw_driver_register(&ur_driver);
if (rc)
@@ -1046,7 +1046,7 @@ static int __init ur_init(void)
fail_unregister_driver:
ccw_driver_unregister(&ur_driver);
fail_class_destroy:
- class_destroy(vmur_class);
+ class_unregister(&vmur_class);
fail_free_dbf:
debug_unregister(vmur_dbf);
return rc;
@@ -1056,7 +1056,7 @@ static void __exit ur_exit(void)
{
unregister_chrdev_region(ur_first_dev_maj_min, NUM_MINORS);
ccw_driver_unregister(&ur_driver);
- class_destroy(vmur_class);
+ class_unregister(&vmur_class);
debug_unregister(vmur_dbf);
pr_info("%s unloaded.\n", ur_banner);
}
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c
index bc3be0330f..0969fa01df 100644
--- a/drivers/s390/char/zcore.c
+++ b/drivers/s390/char/zcore.c
@@ -29,7 +29,6 @@
#include <asm/irqflags.h>
#include <asm/checksum.h>
#include <asm/os_info.h>
-#include <asm/switch_to.h>
#include <asm/maccess.h>
#include "sclp.h"
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index aa3292e57e..b72f672a77 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -31,7 +31,7 @@
* to devices that use multiple subchannels.
*/
-static struct bus_type ccwgroup_bus_type;
+static const struct bus_type ccwgroup_bus_type;
static void __ccwgroup_remove_symlinks(struct ccwgroup_device *gdev)
{
@@ -240,7 +240,7 @@ static int __ccwgroup_create_symlinks(struct ccwgroup_device *gdev)
rc = sysfs_create_link(&gdev->cdev[i]->dev.kobj,
&gdev->dev.kobj, "group_device");
if (rc) {
- for (--i; i >= 0; i--)
+ while (i--)
sysfs_remove_link(&gdev->cdev[i]->dev.kobj,
"group_device");
return rc;
@@ -251,7 +251,7 @@ static int __ccwgroup_create_symlinks(struct ccwgroup_device *gdev)
rc = sysfs_create_link(&gdev->dev.kobj,
&gdev->cdev[i]->dev.kobj, str);
if (rc) {
- for (--i; i >= 0; i--) {
+ while (i--) {
sprintf(str, "cdev%d", i);
sysfs_remove_link(&gdev->dev.kobj, str);
}
@@ -465,7 +465,7 @@ static void ccwgroup_shutdown(struct device *dev)
gdrv->shutdown(gdev);
}
-static struct bus_type ccwgroup_bus_type = {
+static const struct bus_type ccwgroup_bus_type = {
.name = "ccwgroup",
.dev_groups = ccwgroup_dev_groups,
.remove = ccwgroup_remove,
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 64ed55c3ae..44ea76f9e1 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -191,7 +191,7 @@ EXPORT_SYMBOL_GPL(chsc_ssqd);
* Returns 0 on success.
*/
int chsc_sadc(struct subchannel_id schid, struct chsc_scssc_area *scssc,
- u64 summary_indicator_addr, u64 subchannel_indicator_addr, u8 isc)
+ dma64_t summary_indicator_addr, dma64_t subchannel_indicator_addr, u8 isc)
{
memset(scssc, 0, sizeof(*scssc));
scssc->request.length = 0x0fe0;
@@ -844,7 +844,7 @@ chsc_add_cmg_attr(struct channel_subsystem *css)
}
return ret;
cleanup:
- for (--i; i >= 0; i--) {
+ while (i--) {
if (!css->chps[i])
continue;
chp_remove_cmg_attr(css->chps[i]);
@@ -861,9 +861,9 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable)
u32 key : 4;
u32 : 28;
u32 zeroes1;
- u32 cub_addr1;
+ dma32_t cub_addr1;
u32 zeroes2;
- u32 cub_addr2;
+ dma32_t cub_addr2;
u32 reserved[13];
struct chsc_header response;
u32 status : 8;
@@ -881,8 +881,8 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable)
secm_area->request.code = 0x0016;
secm_area->key = PAGE_DEFAULT_KEY >> 4;
- secm_area->cub_addr1 = virt_to_phys(css->cub_addr1);
- secm_area->cub_addr2 = virt_to_phys(css->cub_addr2);
+ secm_area->cub_addr1 = virt_to_dma32(css->cub_addr1);
+ secm_area->cub_addr2 = virt_to_dma32(css->cub_addr2);
secm_area->operation_code = enable ? 0 : 1;
@@ -1091,8 +1091,8 @@ int __init chsc_init(void)
{
int ret;
- sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
- chsc_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ sei_page = (void *)get_zeroed_page(GFP_KERNEL);
+ chsc_page = (void *)get_zeroed_page(GFP_KERNEL);
if (!sei_page || !chsc_page) {
ret = -ENOMEM;
goto out_err;
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index d1caacb08e..03602295f3 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -91,8 +91,8 @@ struct chsc_scssc_area {
u16:16;
u32:32;
u32:32;
- u64 summary_indicator_addr;
- u64 subchannel_indicator_addr;
+ dma64_t summary_indicator_addr;
+ dma64_t subchannel_indicator_addr;
u32 ks:4;
u32 kc:4;
u32:21;
@@ -164,7 +164,7 @@ void chsc_chp_offline(struct chp_id chpid);
int chsc_get_channel_measurement_chars(struct channel_path *chp);
int chsc_ssqd(struct subchannel_id schid, struct chsc_ssqd_area *ssqd);
int chsc_sadc(struct subchannel_id schid, struct chsc_scssc_area *scssc,
- u64 summary_indicator_addr, u64 subchannel_indicator_addr,
+ dma64_t summary_indicator_addr, dma64_t subchannel_indicator_addr,
u8 isc);
int chsc_sgib(u32 origin);
int chsc_error_from_response(int response);
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c
index 902237d0ba..e6c800653f 100644
--- a/drivers/s390/cio/chsc_sch.c
+++ b/drivers/s390/cio/chsc_sch.c
@@ -293,7 +293,7 @@ static int chsc_ioctl_start(void __user *user_area)
if (!css_general_characteristics.dynio)
/* It makes no sense to try. */
return -EOPNOTSUPP;
- chsc_area = (void *)get_zeroed_page(GFP_DMA | GFP_KERNEL);
+ chsc_area = (void *)get_zeroed_page(GFP_KERNEL);
if (!chsc_area)
return -ENOMEM;
request = kzalloc(sizeof(*request), GFP_KERNEL);
@@ -341,7 +341,7 @@ static int chsc_ioctl_on_close_set(void __user *user_area)
ret = -ENOMEM;
goto out_unlock;
}
- on_close_chsc_area = (void *)get_zeroed_page(GFP_DMA | GFP_KERNEL);
+ on_close_chsc_area = (void *)get_zeroed_page(GFP_KERNEL);
if (!on_close_chsc_area) {
ret = -ENOMEM;
goto out_free_request;
@@ -393,7 +393,7 @@ static int chsc_ioctl_start_sync(void __user *user_area)
struct chsc_sync_area *chsc_area;
int ret, ccode;
- chsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ chsc_area = (void *)get_zeroed_page(GFP_KERNEL);
if (!chsc_area)
return -ENOMEM;
if (copy_from_user(chsc_area, user_area, PAGE_SIZE)) {
@@ -439,7 +439,7 @@ static int chsc_ioctl_info_channel_path(void __user *user_cd)
u8 data[PAGE_SIZE - 20];
} __attribute__ ((packed)) *scpcd_area;
- scpcd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ scpcd_area = (void *)get_zeroed_page(GFP_KERNEL);
if (!scpcd_area)
return -ENOMEM;
cd = kzalloc(sizeof(*cd), GFP_KERNEL);
@@ -501,7 +501,7 @@ static int chsc_ioctl_info_cu(void __user *user_cd)
u8 data[PAGE_SIZE - 20];
} __attribute__ ((packed)) *scucd_area;
- scucd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ scucd_area = (void *)get_zeroed_page(GFP_KERNEL);
if (!scucd_area)
return -ENOMEM;
cd = kzalloc(sizeof(*cd), GFP_KERNEL);
@@ -564,7 +564,7 @@ static int chsc_ioctl_info_sch_cu(void __user *user_cud)
u8 data[PAGE_SIZE - 20];
} __attribute__ ((packed)) *sscud_area;
- sscud_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ sscud_area = (void *)get_zeroed_page(GFP_KERNEL);
if (!sscud_area)
return -ENOMEM;
cud = kzalloc(sizeof(*cud), GFP_KERNEL);
@@ -626,7 +626,7 @@ static int chsc_ioctl_conf_info(void __user *user_ci)
u8 data[PAGE_SIZE - 20];
} __attribute__ ((packed)) *sci_area;
- sci_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ sci_area = (void *)get_zeroed_page(GFP_KERNEL);
if (!sci_area)
return -ENOMEM;
ci = kzalloc(sizeof(*ci), GFP_KERNEL);
@@ -697,7 +697,7 @@ static int chsc_ioctl_conf_comp_list(void __user *user_ccl)
u32 res;
} __attribute__ ((packed)) *cssids_parm;
- sccl_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ sccl_area = (void *)get_zeroed_page(GFP_KERNEL);
if (!sccl_area)
return -ENOMEM;
ccl = kzalloc(sizeof(*ccl), GFP_KERNEL);
@@ -757,7 +757,7 @@ static int chsc_ioctl_chpd(void __user *user_chpd)
int ret;
chpd = kzalloc(sizeof(*chpd), GFP_KERNEL);
- scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ scpd_area = (void *)get_zeroed_page(GFP_KERNEL);
if (!scpd_area || !chpd) {
ret = -ENOMEM;
goto out_free;
@@ -797,7 +797,7 @@ static int chsc_ioctl_dcal(void __user *user_dcal)
u8 data[PAGE_SIZE - 36];
} __attribute__ ((packed)) *sdcal_area;
- sdcal_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ sdcal_area = (void *)get_zeroed_page(GFP_KERNEL);
if (!sdcal_area)
return -ENOMEM;
dcal = kzalloc(sizeof(*dcal), GFP_KERNEL);
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index a5736b7357..7e759c2148 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -148,7 +148,7 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */
orb->cmd.i2k = 0;
orb->cmd.key = key >> 4;
/* issue "Start Subchannel" */
- orb->cmd.cpa = (u32)virt_to_phys(cpa);
+ orb->cmd.cpa = virt_to_dma32(cpa);
ccode = ssch(sch->schid, orb);
/* process condition code */
@@ -717,7 +717,7 @@ int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key)
orb->tm.key = key >> 4;
orb->tm.b = 1;
orb->tm.lpm = lpm ? lpm : sch->lpm;
- orb->tm.tcw = (u32)virt_to_phys(tcw);
+ orb->tm.tcw = virt_to_dma32(tcw);
cc = ssch(sch->schid, orb);
switch (cc) {
case 0:
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c
index 5584aa46c9..f80dc18e2a 100644
--- a/drivers/s390/cio/cmf.c
+++ b/drivers/s390/cio/cmf.c
@@ -169,7 +169,8 @@ static inline void cmf_activate(void *area, unsigned int onoff)
" lgr 2,%[mbo]\n"
" schm\n"
:
- : [r1] "d" ((unsigned long)onoff), [mbo] "d" (area)
+ : [r1] "d" ((unsigned long)onoff),
+ [mbo] "d" (virt_to_phys(area))
: "1", "2");
}
@@ -501,8 +502,7 @@ static int alloc_cmb(struct ccw_device *cdev)
WARN_ON(!list_empty(&cmb_area.list));
spin_unlock(&cmb_area.lock);
- mem = (void*)__get_free_pages(GFP_KERNEL | GFP_DMA,
- get_order(size));
+ mem = (void *)__get_free_pages(GFP_KERNEL, get_order(size));
spin_lock(&cmb_area.lock);
if (cmb_area.mem) {
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 28a88ed2c3..1d68db1a3d 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -39,7 +39,7 @@ int max_ssid;
#define MAX_CSS_IDX 0
struct channel_subsystem *channel_subsystems[MAX_CSS_IDX + 1];
-static struct bus_type css_bus_type;
+static const struct bus_type css_bus_type;
int
for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data)
@@ -1114,26 +1114,33 @@ static int cio_dma_pool_init(void)
return 0;
}
-void *cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev,
- size_t size)
+void *__cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev,
+ size_t size, dma32_t *dma_handle)
{
dma_addr_t dma_addr;
- unsigned long addr;
size_t chunk_size;
+ void *addr;
if (!gp_dma)
return NULL;
- addr = gen_pool_alloc(gp_dma, size);
+ addr = gen_pool_dma_alloc(gp_dma, size, &dma_addr);
while (!addr) {
chunk_size = round_up(size, PAGE_SIZE);
- addr = (unsigned long) dma_alloc_coherent(dma_dev,
- chunk_size, &dma_addr, CIO_DMA_GFP);
+ addr = dma_alloc_coherent(dma_dev, chunk_size, &dma_addr, CIO_DMA_GFP);
if (!addr)
return NULL;
- gen_pool_add_virt(gp_dma, addr, dma_addr, chunk_size, -1);
- addr = gen_pool_alloc(gp_dma, size);
+ gen_pool_add_virt(gp_dma, (unsigned long)addr, dma_addr, chunk_size, -1);
+ addr = gen_pool_dma_alloc(gp_dma, size, dma_handle ? &dma_addr : NULL);
}
- return (void *) addr;
+ if (dma_handle)
+ *dma_handle = (__force dma32_t)dma_addr;
+ return addr;
+}
+
+void *cio_gp_dma_zalloc(struct gen_pool *gp_dma, struct device *dma_dev,
+ size_t size)
+{
+ return __cio_gp_dma_zalloc(gp_dma, dma_dev, size, NULL);
}
void cio_gp_dma_free(struct gen_pool *gp_dma, void *cpu_addr, size_t size)
@@ -1409,7 +1416,7 @@ static int css_uevent(const struct device *dev, struct kobj_uevent_env *env)
return ret;
}
-static struct bus_type css_bus_type = {
+static const struct bus_type css_bus_type = {
.name = "css",
.match = css_bus_match,
.probe = css_probe,
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 34b2567b8d..920f550bc3 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -49,7 +49,7 @@ static const unsigned long recovery_delay[] = { 3, 30, 300 };
static atomic_t ccw_device_init_count = ATOMIC_INIT(0);
static DECLARE_WAIT_QUEUE_HEAD(ccw_device_init_wq);
-static struct bus_type ccw_bus_type;
+static const struct bus_type ccw_bus_type;
/******************* bus type handling ***********************/
@@ -1779,7 +1779,7 @@ static void ccw_device_shutdown(struct device *dev)
__disable_cmf(cdev);
}
-static struct bus_type ccw_bus_type = {
+static const struct bus_type ccw_bus_type = {
.name = "ccw",
.match = ccw_bus_match,
.uevent = ccw_uevent,
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index c396ac3e3a..42791fa0b8 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -64,13 +64,13 @@ static void ccw_timeout_log(struct ccw_device *cdev)
printk(KERN_WARNING "cio: orb indicates transport mode\n");
printk(KERN_WARNING "cio: last tcw:\n");
print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
- phys_to_virt(orb->tm.tcw),
+ dma32_to_virt(orb->tm.tcw),
sizeof(struct tcw), 0);
} else {
printk(KERN_WARNING "cio: orb indicates command mode\n");
- if ((void *)(addr_t)orb->cmd.cpa ==
+ if (dma32_to_virt(orb->cmd.cpa) ==
&private->dma_area->sense_ccw ||
- (void *)(addr_t)orb->cmd.cpa ==
+ dma32_to_virt(orb->cmd.cpa) ==
cdev->private->dma_area->iccws)
printk(KERN_WARNING "cio: last channel program "
"(intern):\n");
@@ -78,7 +78,7 @@ static void ccw_timeout_log(struct ccw_device *cdev)
printk(KERN_WARNING "cio: last channel program:\n");
print_hex_dump(KERN_WARNING, "cio: ", DUMP_PREFIX_NONE, 16, 1,
- phys_to_virt(orb->cmd.cpa),
+ dma32_to_virt(orb->cmd.cpa),
sizeof(struct ccw1), 0);
}
printk(KERN_WARNING "cio: ccw device state: %d\n",
@@ -504,6 +504,11 @@ callback:
ccw_device_done(cdev, DEV_STATE_ONLINE);
/* Deliver fake irb to device driver, if needed. */
if (cdev->private->flags.fake_irb) {
+ CIO_MSG_EVENT(2, "fakeirb: deliver device 0.%x.%04x intparm %lx type=%d\n",
+ cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno,
+ cdev->private->intparm,
+ cdev->private->flags.fake_irb);
create_fake_irb(&cdev->private->dma_area->irb,
cdev->private->flags.fake_irb);
cdev->private->flags.fake_irb = 0;
diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c
index ce99ee2457..a512eac834 100644
--- a/drivers/s390/cio/device_id.c
+++ b/drivers/s390/cio/device_id.c
@@ -210,7 +210,7 @@ void ccw_device_sense_id_start(struct ccw_device *cdev)
snsid_init(cdev);
/* Channel program setup. */
cp->cmd_code = CCW_CMD_SENSE_ID;
- cp->cda = (u32)virt_to_phys(&cdev->private->dma_area->senseid);
+ cp->cda = virt_to_dma32(&cdev->private->dma_area->senseid);
cp->count = sizeof(struct senseid);
cp->flags = CCW_FLAG_SLI;
/* Request setup. */
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index a5dba38297..acd6790dba 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -208,6 +208,10 @@ int ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa,
if (!cdev->private->flags.fake_irb) {
cdev->private->flags.fake_irb = FAKE_CMD_IRB;
cdev->private->intparm = intparm;
+ CIO_MSG_EVENT(2, "fakeirb: queue device 0.%x.%04x intparm %lx type=%d\n",
+ cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno, intparm,
+ cdev->private->flags.fake_irb);
return 0;
} else
/* There's already a fake I/O around. */
@@ -551,6 +555,10 @@ int ccw_device_tm_start_timeout_key(struct ccw_device *cdev, struct tcw *tcw,
if (!cdev->private->flags.fake_irb) {
cdev->private->flags.fake_irb = FAKE_TM_IRB;
cdev->private->intparm = intparm;
+ CIO_MSG_EVENT(2, "fakeirb: queue device 0.%x.%04x intparm %lx type=%d\n",
+ cdev->private->dev_id.ssid,
+ cdev->private->dev_id.devno, intparm,
+ cdev->private->flags.fake_irb);
return 0;
} else
/* There's already a fake I/O around. */
@@ -823,13 +831,14 @@ EXPORT_SYMBOL_GPL(ccw_device_get_chid);
* the subchannels dma pool. Maximal size of allocation supported
* is PAGE_SIZE.
*/
-void *ccw_device_dma_zalloc(struct ccw_device *cdev, size_t size)
+void *ccw_device_dma_zalloc(struct ccw_device *cdev, size_t size,
+ dma32_t *dma_handle)
{
void *addr;
if (!get_device(&cdev->dev))
return NULL;
- addr = cio_gp_dma_zalloc(cdev->private->dma_pool, &cdev->dev, size);
+ addr = __cio_gp_dma_zalloc(cdev->private->dma_pool, &cdev->dev, size, dma_handle);
if (IS_ERR_OR_NULL(addr))
put_device(&cdev->dev);
return addr;
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index ad90045873..b3afe283cc 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -141,7 +141,7 @@ static void spid_build_cp(struct ccw_device *cdev, u8 fn)
pgid->inf.fc = fn;
cp->cmd_code = CCW_CMD_SET_PGID;
- cp->cda = (u32)virt_to_phys(pgid);
+ cp->cda = virt_to_dma32(pgid);
cp->count = sizeof(*pgid);
cp->flags = CCW_FLAG_SLI;
req->cp = cp;
@@ -442,7 +442,7 @@ static void snid_build_cp(struct ccw_device *cdev)
/* Channel program setup. */
cp->cmd_code = CCW_CMD_SENSE_PGID;
- cp->cda = (u32)virt_to_phys(&cdev->private->dma_area->pgid[i]);
+ cp->cda = virt_to_dma32(&cdev->private->dma_area->pgid[i]);
cp->count = sizeof(struct pgid);
cp->flags = CCW_FLAG_SLI;
req->cp = cp;
@@ -632,11 +632,11 @@ static void stlck_build_cp(struct ccw_device *cdev, void *buf1, void *buf2)
struct ccw1 *cp = cdev->private->dma_area->iccws;
cp[0].cmd_code = CCW_CMD_STLCK;
- cp[0].cda = (u32)virt_to_phys(buf1);
+ cp[0].cda = virt_to_dma32(buf1);
cp[0].count = 32;
cp[0].flags = CCW_FLAG_CC;
cp[1].cmd_code = CCW_CMD_RELEASE;
- cp[1].cda = (u32)virt_to_phys(buf2);
+ cp[1].cda = virt_to_dma32(buf2);
cp[1].count = 32;
cp[1].flags = 0;
req->cp = cp;
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c
index 6c2e35065f..0ff8482a7b 100644
--- a/drivers/s390/cio/device_status.c
+++ b/drivers/s390/cio/device_status.c
@@ -332,7 +332,7 @@ ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb)
*/
sense_ccw = &to_io_private(sch)->dma_area->sense_ccw;
sense_ccw->cmd_code = CCW_CMD_BASIC_SENSE;
- sense_ccw->cda = virt_to_phys(cdev->private->dma_area->irb.ecw);
+ sense_ccw->cda = virt_to_dma32(cdev->private->dma_area->irb.ecw);
sense_ccw->count = SENSE_MAX_COUNT;
sense_ccw->flags = CCW_FLAG_SLI;
diff --git a/drivers/s390/cio/eadm_sch.c b/drivers/s390/cio/eadm_sch.c
index 1caedf931a..165de15523 100644
--- a/drivers/s390/cio/eadm_sch.c
+++ b/drivers/s390/cio/eadm_sch.c
@@ -63,7 +63,7 @@ static int eadm_subchannel_start(struct subchannel *sch, struct aob *aob)
int cc;
orb_init(orb);
- orb->eadm.aob = (u32)virt_to_phys(aob);
+ orb->eadm.aob = virt_to_dma32(aob);
orb->eadm.intparm = (u32)virt_to_phys(sch);
orb->eadm.key = PAGE_DEFAULT_KEY >> 4;
@@ -147,7 +147,7 @@ static void eadm_subchannel_irq(struct subchannel *sch)
css_sched_sch_todo(sch, SCH_TODO_EVAL);
return;
}
- scm_irq_handler(phys_to_virt(scsw->aob), error);
+ scm_irq_handler(dma32_to_virt(scsw->aob), error);
private->state = EADM_IDLE;
if (private->completion)
diff --git a/drivers/s390/cio/fcx.c b/drivers/s390/cio/fcx.c
index 84f24a2f46..ba35b64949 100644
--- a/drivers/s390/cio/fcx.c
+++ b/drivers/s390/cio/fcx.c
@@ -25,7 +25,7 @@
*/
struct tcw *tcw_get_intrg(struct tcw *tcw)
{
- return phys_to_virt(tcw->intrg);
+ return dma32_to_virt(tcw->intrg);
}
EXPORT_SYMBOL(tcw_get_intrg);
@@ -40,9 +40,9 @@ EXPORT_SYMBOL(tcw_get_intrg);
void *tcw_get_data(struct tcw *tcw)
{
if (tcw->r)
- return phys_to_virt(tcw->input);
+ return dma64_to_virt(tcw->input);
if (tcw->w)
- return phys_to_virt(tcw->output);
+ return dma64_to_virt(tcw->output);
return NULL;
}
EXPORT_SYMBOL(tcw_get_data);
@@ -55,7 +55,7 @@ EXPORT_SYMBOL(tcw_get_data);
*/
struct tccb *tcw_get_tccb(struct tcw *tcw)
{
- return phys_to_virt(tcw->tccb);
+ return dma64_to_virt(tcw->tccb);
}
EXPORT_SYMBOL(tcw_get_tccb);
@@ -67,7 +67,7 @@ EXPORT_SYMBOL(tcw_get_tccb);
*/
struct tsb *tcw_get_tsb(struct tcw *tcw)
{
- return phys_to_virt(tcw->tsb);
+ return dma64_to_virt(tcw->tsb);
}
EXPORT_SYMBOL(tcw_get_tsb);
@@ -190,7 +190,7 @@ EXPORT_SYMBOL(tcw_finalize);
*/
void tcw_set_intrg(struct tcw *tcw, struct tcw *intrg_tcw)
{
- tcw->intrg = (u32)virt_to_phys(intrg_tcw);
+ tcw->intrg = virt_to_dma32(intrg_tcw);
}
EXPORT_SYMBOL(tcw_set_intrg);
@@ -208,11 +208,11 @@ EXPORT_SYMBOL(tcw_set_intrg);
void tcw_set_data(struct tcw *tcw, void *data, int use_tidal)
{
if (tcw->r) {
- tcw->input = virt_to_phys(data);
+ tcw->input = virt_to_dma64(data);
if (use_tidal)
tcw->flags |= TCW_FLAGS_INPUT_TIDA;
} else if (tcw->w) {
- tcw->output = virt_to_phys(data);
+ tcw->output = virt_to_dma64(data);
if (use_tidal)
tcw->flags |= TCW_FLAGS_OUTPUT_TIDA;
}
@@ -228,7 +228,7 @@ EXPORT_SYMBOL(tcw_set_data);
*/
void tcw_set_tccb(struct tcw *tcw, struct tccb *tccb)
{
- tcw->tccb = virt_to_phys(tccb);
+ tcw->tccb = virt_to_dma64(tccb);
}
EXPORT_SYMBOL(tcw_set_tccb);
@@ -241,7 +241,7 @@ EXPORT_SYMBOL(tcw_set_tccb);
*/
void tcw_set_tsb(struct tcw *tcw, struct tsb *tsb)
{
- tcw->tsb = virt_to_phys(tsb);
+ tcw->tsb = virt_to_dma64(tsb);
}
EXPORT_SYMBOL(tcw_set_tsb);
@@ -346,7 +346,7 @@ struct tidaw *tcw_add_tidaw(struct tcw *tcw, int num_tidaws, u8 flags,
memset(tidaw, 0, sizeof(struct tidaw));
tidaw->flags = flags;
tidaw->count = count;
- tidaw->addr = virt_to_phys(addr);
+ tidaw->addr = virt_to_dma64(addr);
return tidaw;
}
EXPORT_SYMBOL(tcw_add_tidaw);
diff --git a/drivers/s390/cio/orb.h b/drivers/s390/cio/orb.h
index a2d3778b2c..14d2a1822b 100644
--- a/drivers/s390/cio/orb.h
+++ b/drivers/s390/cio/orb.h
@@ -12,6 +12,9 @@
#ifndef S390_ORB_H
#define S390_ORB_H
+#include <linux/types.h>
+#include <asm/dma-types.h>
+
/*
* Command-mode operation request block
*/
@@ -34,7 +37,7 @@ struct cmd_orb {
u32 ils:1; /* incorrect length */
u32 zero:6; /* reserved zeros */
u32 orbx:1; /* ORB extension control */
- u32 cpa; /* channel program address */
+ dma32_t cpa; /* channel program address */
} __packed __aligned(4);
/*
@@ -49,7 +52,7 @@ struct tm_orb {
u32 lpm:8;
u32:7;
u32 x:1;
- u32 tcw;
+ dma32_t tcw;
u32 prio:8;
u32:8;
u32 rsvpgm:8;
@@ -71,7 +74,7 @@ struct eadm_orb {
u32 compat2:1;
u32:21;
u32 x:1;
- u32 aob;
+ dma32_t aob;
u32 css_prio:8;
u32:8;
u32 scm_prio:8;
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index ebcb535809..a1cb39f4b7 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -82,7 +82,7 @@ static inline int do_siga_input(unsigned long schid, unsigned long mask,
*/
static inline int do_siga_output(unsigned long schid, unsigned long mask,
unsigned int *bb, unsigned long fc,
- unsigned long aob)
+ dma64_t aob)
{
int cc;
@@ -321,7 +321,7 @@ static inline int qdio_siga_sync_q(struct qdio_q *q)
}
static int qdio_siga_output(struct qdio_q *q, unsigned int count,
- unsigned int *busy_bit, unsigned long aob)
+ unsigned int *busy_bit, dma64_t aob)
{
unsigned long schid = *((u32 *) &q->irq_ptr->schid);
unsigned int fc = QDIO_SIGA_WRITE;
@@ -628,7 +628,7 @@ int qdio_inspect_output_queue(struct ccw_device *cdev, unsigned int nr,
EXPORT_SYMBOL_GPL(qdio_inspect_output_queue);
static int qdio_kick_outbound_q(struct qdio_q *q, unsigned int count,
- unsigned long aob)
+ dma64_t aob)
{
int retries = 0, cc;
unsigned int busy_bit;
@@ -1088,7 +1088,7 @@ int qdio_establish(struct ccw_device *cdev,
irq_ptr->ccw->cmd_code = ciw->cmd;
irq_ptr->ccw->flags = CCW_FLAG_SLI;
irq_ptr->ccw->count = ciw->count;
- irq_ptr->ccw->cda = (u32) virt_to_phys(irq_ptr->qdr);
+ irq_ptr->ccw->cda = virt_to_dma32(irq_ptr->qdr);
spin_lock_irq(get_ccwdev_lock(cdev));
ccw_device_set_options_mask(cdev, 0);
@@ -1281,9 +1281,9 @@ static int handle_outbound(struct qdio_q *q, unsigned int bufnr, unsigned int co
qperf_inc(q, outbound_queue_full);
if (queue_type(q) == QDIO_IQDIO_QFMT) {
- unsigned long phys_aob = aob ? virt_to_phys(aob) : 0;
+ dma64_t phys_aob = aob ? virt_to_dma64(aob) : 0;
- WARN_ON_ONCE(!IS_ALIGNED(phys_aob, 256));
+ WARN_ON_ONCE(!IS_ALIGNED(dma64_to_u64(phys_aob), 256));
rc = qdio_kick_outbound_q(q, count, phys_aob);
} else if (qdio_need_siga_sync(q->irq_ptr)) {
rc = qdio_sync_output_queue(q);
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 714878e2ac..99c0fd2302 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -179,7 +179,7 @@ static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
/* fill in sl */
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
- q->sl->element[j].sbal = virt_to_phys(q->sbal[j]);
+ q->sl->element[j].sbal = virt_to_dma64(q->sbal[j]);
}
static void setup_queues(struct qdio_irq *irq_ptr,
@@ -291,9 +291,9 @@ void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr)
static void qdio_fill_qdr_desc(struct qdesfmt0 *desc, struct qdio_q *queue)
{
- desc->sliba = virt_to_phys(queue->slib);
- desc->sla = virt_to_phys(queue->sl);
- desc->slsba = virt_to_phys(&queue->slsb);
+ desc->sliba = virt_to_dma64(queue->slib);
+ desc->sla = virt_to_dma64(queue->sl);
+ desc->slsba = virt_to_dma64(&queue->slsb);
desc->akey = PAGE_DEFAULT_KEY >> 4;
desc->bkey = PAGE_DEFAULT_KEY >> 4;
@@ -315,7 +315,7 @@ static void setup_qdr(struct qdio_irq *irq_ptr,
irq_ptr->qdr->oqdcnt = qdio_init->no_output_qs;
irq_ptr->qdr->iqdsz = sizeof(struct qdesfmt0) / 4; /* size in words */
irq_ptr->qdr->oqdsz = sizeof(struct qdesfmt0) / 4;
- irq_ptr->qdr->qiba = virt_to_phys(&irq_ptr->qib);
+ irq_ptr->qdr->qiba = virt_to_dma64(&irq_ptr->qib);
irq_ptr->qdr->qkey = PAGE_DEFAULT_KEY >> 4;
for (i = 0; i < qdio_init->no_input_qs; i++)
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index 9b9335dd06..ccd4ed93bd 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -137,15 +137,15 @@ static struct airq_struct tiqdio_airq = {
static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset)
{
struct chsc_scssc_area *scssc = (void *)irq_ptr->chsc_page;
- u64 summary_indicator_addr, subchannel_indicator_addr;
+ dma64_t summary_indicator_addr, subchannel_indicator_addr;
int rc;
if (reset) {
summary_indicator_addr = 0;
subchannel_indicator_addr = 0;
} else {
- summary_indicator_addr = virt_to_phys(tiqdio_airq.lsi_ptr);
- subchannel_indicator_addr = virt_to_phys(irq_ptr->dsci);
+ summary_indicator_addr = virt_to_dma64(tiqdio_airq.lsi_ptr);
+ subchannel_indicator_addr = virt_to_dma64(irq_ptr->dsci);
}
rc = chsc_sadc(irq_ptr->schid, scssc, summary_indicator_addr,
diff --git a/drivers/s390/cio/scm.c b/drivers/s390/cio/scm.c
index 6b21ba68c1..c7894d6130 100644
--- a/drivers/s390/cio/scm.c
+++ b/drivers/s390/cio/scm.c
@@ -42,7 +42,7 @@ static int scmdev_uevent(const struct device *dev, struct kobj_uevent_env *env)
return add_uevent_var(env, "MODALIAS=scm:scmdev");
}
-static struct bus_type scm_bus_type = {
+static const struct bus_type scm_bus_type = {
.name = "scm",
.probe = scmdev_probe,
.remove = scmdev_remove,
@@ -228,7 +228,7 @@ int scm_update_information(void)
size_t num;
int ret;
- scm_info = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
+ scm_info = (void *)__get_free_page(GFP_KERNEL);
if (!scm_info)
return -ENOMEM;
diff --git a/drivers/s390/cio/vfio_ccw_cp.c b/drivers/s390/cio/vfio_ccw_cp.c
index aafd66305e..6e5c508b1e 100644
--- a/drivers/s390/cio/vfio_ccw_cp.c
+++ b/drivers/s390/cio/vfio_ccw_cp.c
@@ -190,7 +190,7 @@ static bool page_array_iova_pinned(struct page_array *pa, u64 iova, u64 length)
}
/* Create the list of IDAL words for a page_array. */
static inline void page_array_idal_create_words(struct page_array *pa,
- unsigned long *idaws)
+ dma64_t *idaws)
{
int i;
@@ -203,10 +203,10 @@ static inline void page_array_idal_create_words(struct page_array *pa,
*/
for (i = 0; i < pa->pa_nr; i++) {
- idaws[i] = page_to_phys(pa->pa_page[i]);
+ idaws[i] = virt_to_dma64(page_to_virt(pa->pa_page[i]));
/* Incorporate any offset from each starting address */
- idaws[i] += pa->pa_iova[i] & (PAGE_SIZE - 1);
+ idaws[i] = dma64_add(idaws[i], pa->pa_iova[i] & ~PAGE_MASK);
}
}
@@ -227,7 +227,7 @@ static void convert_ccw0_to_ccw1(struct ccw1 *source, unsigned long len)
pccw1->flags = ccw0.flags;
pccw1->count = ccw0.count;
}
- pccw1->cda = ccw0.cda;
+ pccw1->cda = u32_to_dma32(ccw0.cda);
pccw1++;
}
}
@@ -299,11 +299,12 @@ static inline int ccw_does_data_transfer(struct ccw1 *ccw)
*
* Returns 1 if yes, 0 if no.
*/
-static inline int is_cpa_within_range(u32 cpa, u32 head, int len)
+static inline int is_cpa_within_range(dma32_t cpa, u32 head, int len)
{
u32 tail = head + (len - 1) * sizeof(struct ccw1);
+ u32 gcpa = dma32_to_u32(cpa);
- return (head <= cpa && cpa <= tail);
+ return head <= gcpa && gcpa <= tail;
}
static inline int is_tic_within_range(struct ccw1 *ccw, u32 head, int len)
@@ -356,7 +357,7 @@ static void ccwchain_cda_free(struct ccwchain *chain, int idx)
if (ccw_is_tic(ccw))
return;
- kfree(phys_to_virt(ccw->cda));
+ kfree(dma32_to_virt(ccw->cda));
}
/**
@@ -417,15 +418,17 @@ static int tic_target_chain_exists(struct ccw1 *tic, struct channel_program *cp)
static int ccwchain_loop_tic(struct ccwchain *chain,
struct channel_program *cp);
-static int ccwchain_handle_ccw(u32 cda, struct channel_program *cp)
+static int ccwchain_handle_ccw(dma32_t cda, struct channel_program *cp)
{
struct vfio_device *vdev =
&container_of(cp, struct vfio_ccw_private, cp)->vdev;
struct ccwchain *chain;
int len, ret;
+ u32 gcda;
+ gcda = dma32_to_u32(cda);
/* Copy 2K (the most we support today) of possible CCWs */
- ret = vfio_dma_rw(vdev, cda, cp->guest_cp, CCWCHAIN_LEN_MAX * sizeof(struct ccw1), false);
+ ret = vfio_dma_rw(vdev, gcda, cp->guest_cp, CCWCHAIN_LEN_MAX * sizeof(struct ccw1), false);
if (ret)
return ret;
@@ -434,7 +437,7 @@ static int ccwchain_handle_ccw(u32 cda, struct channel_program *cp)
convert_ccw0_to_ccw1(cp->guest_cp, CCWCHAIN_LEN_MAX);
/* Count the CCWs in the current chain */
- len = ccwchain_calc_length(cda, cp);
+ len = ccwchain_calc_length(gcda, cp);
if (len < 0)
return len;
@@ -444,7 +447,7 @@ static int ccwchain_handle_ccw(u32 cda, struct channel_program *cp)
return -ENOMEM;
chain->ch_len = len;
- chain->ch_iova = cda;
+ chain->ch_iova = gcda;
/* Copy the actual CCWs into the new chain */
memcpy(chain->ch_ccw, cp->guest_cp, len * sizeof(struct ccw1));
@@ -487,13 +490,13 @@ static int ccwchain_fetch_tic(struct ccw1 *ccw,
struct channel_program *cp)
{
struct ccwchain *iter;
- u32 ccw_head;
+ u32 cda, ccw_head;
list_for_each_entry(iter, &cp->ccwchain_list, next) {
ccw_head = iter->ch_iova;
if (is_cpa_within_range(ccw->cda, ccw_head, iter->ch_len)) {
- ccw->cda = (__u32) (addr_t) (((char *)iter->ch_ccw) +
- (ccw->cda - ccw_head));
+ cda = (u64)iter->ch_ccw + dma32_to_u32(ccw->cda) - ccw_head;
+ ccw->cda = u32_to_dma32(cda);
return 0;
}
}
@@ -501,14 +504,12 @@ static int ccwchain_fetch_tic(struct ccw1 *ccw,
return -EFAULT;
}
-static unsigned long *get_guest_idal(struct ccw1 *ccw,
- struct channel_program *cp,
- int idaw_nr)
+static dma64_t *get_guest_idal(struct ccw1 *ccw, struct channel_program *cp, int idaw_nr)
{
struct vfio_device *vdev =
&container_of(cp, struct vfio_ccw_private, cp)->vdev;
- unsigned long *idaws;
- unsigned int *idaws_f1;
+ dma64_t *idaws;
+ dma32_t *idaws_f1;
int idal_len = idaw_nr * sizeof(*idaws);
int idaw_size = idal_is_2k(cp) ? PAGE_SIZE / 2 : PAGE_SIZE;
int idaw_mask = ~(idaw_size - 1);
@@ -520,7 +521,7 @@ static unsigned long *get_guest_idal(struct ccw1 *ccw,
if (ccw_is_idal(ccw)) {
/* Copy IDAL from guest */
- ret = vfio_dma_rw(vdev, ccw->cda, idaws, idal_len, false);
+ ret = vfio_dma_rw(vdev, dma32_to_u32(ccw->cda), idaws, idal_len, false);
if (ret) {
kfree(idaws);
return ERR_PTR(ret);
@@ -528,14 +529,18 @@ static unsigned long *get_guest_idal(struct ccw1 *ccw,
} else {
/* Fabricate an IDAL based off CCW data address */
if (cp->orb.cmd.c64) {
- idaws[0] = ccw->cda;
- for (i = 1; i < idaw_nr; i++)
- idaws[i] = (idaws[i - 1] + idaw_size) & idaw_mask;
+ idaws[0] = u64_to_dma64(dma32_to_u32(ccw->cda));
+ for (i = 1; i < idaw_nr; i++) {
+ idaws[i] = dma64_add(idaws[i - 1], idaw_size);
+ idaws[i] = dma64_and(idaws[i], idaw_mask);
+ }
} else {
- idaws_f1 = (unsigned int *)idaws;
+ idaws_f1 = (dma32_t *)idaws;
idaws_f1[0] = ccw->cda;
- for (i = 1; i < idaw_nr; i++)
- idaws_f1[i] = (idaws_f1[i - 1] + idaw_size) & idaw_mask;
+ for (i = 1; i < idaw_nr; i++) {
+ idaws_f1[i] = dma32_add(idaws_f1[i - 1], idaw_size);
+ idaws_f1[i] = dma32_and(idaws_f1[i], idaw_mask);
+ }
}
}
@@ -572,7 +577,7 @@ static int ccw_count_idaws(struct ccw1 *ccw,
if (ccw_is_idal(ccw)) {
/* Read first IDAW to check its starting address. */
/* All subsequent IDAWs will be 2K- or 4K-aligned. */
- ret = vfio_dma_rw(vdev, ccw->cda, &iova, size, false);
+ ret = vfio_dma_rw(vdev, dma32_to_u32(ccw->cda), &iova, size, false);
if (ret)
return ret;
@@ -583,7 +588,7 @@ static int ccw_count_idaws(struct ccw1 *ccw,
if (!cp->orb.cmd.c64)
iova = iova >> 32;
} else {
- iova = ccw->cda;
+ iova = dma32_to_u32(ccw->cda);
}
/* Format-1 IDAWs operate on 2K each */
@@ -604,8 +609,8 @@ static int ccwchain_fetch_ccw(struct ccw1 *ccw,
{
struct vfio_device *vdev =
&container_of(cp, struct vfio_ccw_private, cp)->vdev;
- unsigned long *idaws;
- unsigned int *idaws_f1;
+ dma64_t *idaws;
+ dma32_t *idaws_f1;
int ret;
int idaw_nr;
int i;
@@ -636,12 +641,12 @@ static int ccwchain_fetch_ccw(struct ccw1 *ccw,
* Copy guest IDAWs into page_array, in case the memory they
* occupy is not contiguous.
*/
- idaws_f1 = (unsigned int *)idaws;
+ idaws_f1 = (dma32_t *)idaws;
for (i = 0; i < idaw_nr; i++) {
if (cp->orb.cmd.c64)
- pa->pa_iova[i] = idaws[i];
+ pa->pa_iova[i] = dma64_to_u64(idaws[i]);
else
- pa->pa_iova[i] = idaws_f1[i];
+ pa->pa_iova[i] = dma32_to_u32(idaws_f1[i]);
}
if (ccw_does_data_transfer(ccw)) {
@@ -652,7 +657,7 @@ static int ccwchain_fetch_ccw(struct ccw1 *ccw,
pa->pa_nr = 0;
}
- ccw->cda = (__u32) virt_to_phys(idaws);
+ ccw->cda = virt_to_dma32(idaws);
ccw->flags |= CCW_FLAG_IDA;
/* Populate the IDAL with pinned/translated addresses from page */
@@ -874,7 +879,7 @@ union orb *cp_get_orb(struct channel_program *cp, struct subchannel *sch)
chain = list_first_entry(&cp->ccwchain_list, struct ccwchain, next);
cpa = chain->ch_ccw;
- orb->cmd.cpa = (__u32)virt_to_phys(cpa);
+ orb->cmd.cpa = virt_to_dma32(cpa);
return orb;
}
@@ -896,7 +901,7 @@ union orb *cp_get_orb(struct channel_program *cp, struct subchannel *sch)
void cp_update_scsw(struct channel_program *cp, union scsw *scsw)
{
struct ccwchain *chain;
- u32 cpa = scsw->cmd.cpa;
+ dma32_t cpa = scsw->cmd.cpa;
u32 ccw_head;
if (!cp->initialized)
@@ -919,9 +924,10 @@ void cp_update_scsw(struct channel_program *cp, union scsw *scsw)
* (cpa - ccw_head) is the offset value of the host
* physical ccw to its chain head.
* Adding this value to the guest physical ccw chain
- * head gets us the guest cpa.
+ * head gets us the guest cpa:
+ * cpa = chain->ch_iova + (cpa - ccw_head)
*/
- cpa = chain->ch_iova + (cpa - ccw_head);
+ cpa = dma32_add(cpa, chain->ch_iova - ccw_head);
break;
}
}
diff --git a/drivers/s390/cio/vfio_ccw_fsm.c b/drivers/s390/cio/vfio_ccw_fsm.c
index 09877b4618..4d7988ea47 100644
--- a/drivers/s390/cio/vfio_ccw_fsm.c
+++ b/drivers/s390/cio/vfio_ccw_fsm.c
@@ -378,7 +378,7 @@ static void fsm_open(struct vfio_ccw_private *private,
spin_lock_irq(&sch->lock);
sch->isc = VFIO_CCW_ISC;
- ret = cio_enable_subchannel(sch, (u32)(unsigned long)sch);
+ ret = cio_enable_subchannel(sch, (u32)virt_to_phys(sch));
if (ret)
goto err_unlock;
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index f46dd6abac..ca7e626a97 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -38,6 +38,7 @@
#include <linux/debugfs.h>
#include <linux/ctype.h>
#include <linux/module.h>
+#include <asm/uv.h>
#include "ap_bus.h"
#include "ap_debug.h"
@@ -83,14 +84,11 @@ EXPORT_SYMBOL(ap_perms);
DEFINE_MUTEX(ap_perms_mutex);
EXPORT_SYMBOL(ap_perms_mutex);
-/* # of bus scans since init */
-static atomic64_t ap_scan_bus_count;
-
/* # of bindings complete since init */
static atomic64_t ap_bindings_complete_count = ATOMIC64_INIT(0);
-/* completion for initial APQN bindings complete */
-static DECLARE_COMPLETION(ap_init_apqn_bindings_complete);
+/* completion for APQN bindings complete */
+static DECLARE_COMPLETION(ap_apqn_bindings_complete);
static struct ap_config_info *ap_qci_info;
static struct ap_config_info *ap_qci_info_old;
@@ -101,12 +99,16 @@ static struct ap_config_info *ap_qci_info_old;
debug_info_t *ap_dbf_info;
/*
- * Workqueue timer for bus rescan.
+ * AP bus rescan related things.
*/
-static struct timer_list ap_config_timer;
-static int ap_config_time = AP_CONFIG_TIME;
-static void ap_scan_bus(struct work_struct *);
-static DECLARE_WORK(ap_scan_work, ap_scan_bus);
+static bool ap_scan_bus(void);
+static bool ap_scan_bus_result; /* result of last ap_scan_bus() */
+static DEFINE_MUTEX(ap_scan_bus_mutex); /* mutex ap_scan_bus() invocations */
+static atomic64_t ap_scan_bus_count; /* counter ap_scan_bus() invocations */
+static int ap_scan_bus_time = AP_CONFIG_TIME;
+static struct timer_list ap_scan_bus_timer;
+static void ap_scan_bus_wq_callback(struct work_struct *);
+static DECLARE_WORK(ap_scan_bus_work, ap_scan_bus_wq_callback);
/*
* Tasklet & timer for AP request polling and interrupts
@@ -135,7 +137,7 @@ static int ap_max_domain_id = 15;
/* Maximum adapter id, if not given via qci */
static int ap_max_adapter_id = 63;
-static struct bus_type ap_bus_type;
+static const struct bus_type ap_bus_type;
/* Adapter interrupt definitions */
static void ap_interrupt_handler(struct airq_struct *airq,
@@ -753,7 +755,7 @@ static void ap_calc_bound_apqns(unsigned int *apqns, unsigned int *bound)
}
/*
- * After initial ap bus scan do check if all existing APQNs are
+ * After ap bus scan do check if all existing APQNs are
* bound to device drivers.
*/
static void ap_check_bindings_complete(void)
@@ -763,11 +765,11 @@ static void ap_check_bindings_complete(void)
if (atomic64_read(&ap_scan_bus_count) >= 1) {
ap_calc_bound_apqns(&apqns, &bound);
if (bound == apqns) {
- if (!completion_done(&ap_init_apqn_bindings_complete)) {
- complete_all(&ap_init_apqn_bindings_complete);
- AP_DBF_INFO("%s complete\n", __func__);
+ if (!completion_done(&ap_apqn_bindings_complete)) {
+ complete_all(&ap_apqn_bindings_complete);
+ ap_send_bindings_complete_uevent();
+ pr_debug("%s all apqn bindings complete\n", __func__);
}
- ap_send_bindings_complete_uevent();
}
}
}
@@ -782,27 +784,29 @@ static void ap_check_bindings_complete(void)
* -ETIME is returned. On failures negative return values are
* returned to the caller.
*/
-int ap_wait_init_apqn_bindings_complete(unsigned long timeout)
+int ap_wait_apqn_bindings_complete(unsigned long timeout)
{
+ int rc = 0;
long l;
- if (completion_done(&ap_init_apqn_bindings_complete))
+ if (completion_done(&ap_apqn_bindings_complete))
return 0;
if (timeout)
l = wait_for_completion_interruptible_timeout(
- &ap_init_apqn_bindings_complete, timeout);
+ &ap_apqn_bindings_complete, timeout);
else
l = wait_for_completion_interruptible(
- &ap_init_apqn_bindings_complete);
+ &ap_apqn_bindings_complete);
if (l < 0)
- return l == -ERESTARTSYS ? -EINTR : l;
+ rc = l == -ERESTARTSYS ? -EINTR : l;
else if (l == 0 && timeout)
- return -ETIME;
+ rc = -ETIME;
- return 0;
+ pr_debug("%s rc=%d\n", __func__, rc);
+ return rc;
}
-EXPORT_SYMBOL(ap_wait_init_apqn_bindings_complete);
+EXPORT_SYMBOL(ap_wait_apqn_bindings_complete);
static int __ap_queue_devices_with_id_unregister(struct device *dev, void *data)
{
@@ -826,8 +830,8 @@ static int __ap_revise_reserved(struct device *dev, void *dummy)
drvres = to_ap_drv(dev->driver)->flags
& AP_DRIVER_FLAG_DEFAULT;
if (!!devres != !!drvres) {
- AP_DBF_DBG("%s reprobing queue=%02x.%04x\n",
- __func__, card, queue);
+ pr_debug("%s reprobing queue=%02x.%04x\n",
+ __func__, card, queue);
rc = device_reprobe(dev);
if (rc)
AP_DBF_WARN("%s reprobing queue=%02x.%04x failed\n",
@@ -925,6 +929,12 @@ static int ap_device_probe(struct device *dev)
goto out;
}
+ /*
+ * Rearm the bindings complete completion to trigger
+ * bindings complete when all devices are bound again
+ */
+ reinit_completion(&ap_apqn_bindings_complete);
+
/* Add queue/card to list of active queues/cards */
spin_lock_bh(&ap_queues_lock);
if (is_queue_dev(dev))
@@ -939,8 +949,6 @@ static int ap_device_probe(struct device *dev)
if (is_queue_dev(dev))
hash_del(&to_ap_queue(dev)->hnode);
spin_unlock_bh(&ap_queues_lock);
- } else {
- ap_check_bindings_complete();
}
out:
@@ -1012,16 +1020,47 @@ void ap_driver_unregister(struct ap_driver *ap_drv)
}
EXPORT_SYMBOL(ap_driver_unregister);
-void ap_bus_force_rescan(void)
+/*
+ * Enforce a synchronous AP bus rescan.
+ * Returns true if the bus scan finds a change in the AP configuration
+ * and AP devices have been added or deleted when this function returns.
+ */
+bool ap_bus_force_rescan(void)
{
+ unsigned long scan_counter = atomic64_read(&ap_scan_bus_count);
+ bool rc = false;
+
+ pr_debug(">%s scan counter=%lu\n", __func__, scan_counter);
+
/* Only trigger AP bus scans after the initial scan is done */
- if (atomic64_read(&ap_scan_bus_count) <= 0)
- return;
+ if (scan_counter <= 0)
+ goto out;
+
+ /* Try to acquire the AP scan bus mutex */
+ if (mutex_trylock(&ap_scan_bus_mutex)) {
+ /* mutex acquired, run the AP bus scan */
+ ap_scan_bus_result = ap_scan_bus();
+ rc = ap_scan_bus_result;
+ mutex_unlock(&ap_scan_bus_mutex);
+ goto out;
+ }
- /* processing a asynchronous bus rescan */
- del_timer(&ap_config_timer);
- queue_work(system_long_wq, &ap_scan_work);
- flush_work(&ap_scan_work);
+ /*
+ * Mutex acquire failed. So there is currently another task
+ * already running the AP bus scan. Then let's simple wait
+ * for the lock which means the other task has finished and
+ * stored the result in ap_scan_bus_result.
+ */
+ if (mutex_lock_interruptible(&ap_scan_bus_mutex)) {
+ /* some error occurred, ignore and go out */
+ goto out;
+ }
+ rc = ap_scan_bus_result;
+ mutex_unlock(&ap_scan_bus_mutex);
+
+out:
+ pr_debug("%s rc=%d\n", __func__, rc);
+ return rc;
}
EXPORT_SYMBOL(ap_bus_force_rescan);
@@ -1030,7 +1069,7 @@ EXPORT_SYMBOL(ap_bus_force_rescan);
*/
void ap_bus_cfg_chg(void)
{
- AP_DBF_DBG("%s config change, forcing bus rescan\n", __func__);
+ pr_debug("%s config change, forcing bus rescan\n", __func__);
ap_bus_force_rescan();
}
@@ -1090,7 +1129,7 @@ static int hex2bitmap(const char *str, unsigned long *bitmap, int bits)
*/
static int modify_bitmap(const char *str, unsigned long *bitmap, int bits)
{
- int a, i, z;
+ unsigned long a, i, z;
char *np, sign;
/* bits needs to be a multiple of 8 */
@@ -1250,7 +1289,7 @@ static BUS_ATTR_RO(ap_interrupts);
static ssize_t config_time_show(const struct bus_type *bus, char *buf)
{
- return sysfs_emit(buf, "%d\n", ap_config_time);
+ return sysfs_emit(buf, "%d\n", ap_scan_bus_time);
}
static ssize_t config_time_store(const struct bus_type *bus,
@@ -1260,8 +1299,8 @@ static ssize_t config_time_store(const struct bus_type *bus,
if (sscanf(buf, "%d\n", &time) != 1 || time < 5 || time > 120)
return -EINVAL;
- ap_config_time = time;
- mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ);
+ ap_scan_bus_time = time;
+ mod_timer(&ap_scan_bus_timer, jiffies + ap_scan_bus_time * HZ);
return count;
}
@@ -1603,7 +1642,7 @@ static struct attribute *ap_bus_attrs[] = {
};
ATTRIBUTE_GROUPS(ap_bus);
-static struct bus_type ap_bus_type = {
+static const struct bus_type ap_bus_type = {
.name = "ap",
.bus_groups = ap_bus_groups,
.match = &ap_bus_match,
@@ -1888,8 +1927,8 @@ static inline void ap_scan_domains(struct ap_card *ac)
aq->last_err_rc = AP_RESPONSE_CHECKSTOPPED;
}
spin_unlock_bh(&aq->lock);
- AP_DBF_DBG("%s(%d,%d) queue dev checkstop on\n",
- __func__, ac->id, dom);
+ pr_debug("%s(%d,%d) queue dev checkstop on\n",
+ __func__, ac->id, dom);
/* 'receive' pending messages with -EAGAIN */
ap_flush_queue(aq);
goto put_dev_and_continue;
@@ -1899,8 +1938,8 @@ static inline void ap_scan_domains(struct ap_card *ac)
if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
_ap_queue_init_state(aq);
spin_unlock_bh(&aq->lock);
- AP_DBF_DBG("%s(%d,%d) queue dev checkstop off\n",
- __func__, ac->id, dom);
+ pr_debug("%s(%d,%d) queue dev checkstop off\n",
+ __func__, ac->id, dom);
goto put_dev_and_continue;
}
/* config state change */
@@ -1912,8 +1951,8 @@ static inline void ap_scan_domains(struct ap_card *ac)
aq->last_err_rc = AP_RESPONSE_DECONFIGURED;
}
spin_unlock_bh(&aq->lock);
- AP_DBF_DBG("%s(%d,%d) queue dev config off\n",
- __func__, ac->id, dom);
+ pr_debug("%s(%d,%d) queue dev config off\n",
+ __func__, ac->id, dom);
ap_send_config_uevent(&aq->ap_dev, aq->config);
/* 'receive' pending messages with -EAGAIN */
ap_flush_queue(aq);
@@ -1924,8 +1963,8 @@ static inline void ap_scan_domains(struct ap_card *ac)
if (aq->dev_state > AP_DEV_STATE_UNINITIATED)
_ap_queue_init_state(aq);
spin_unlock_bh(&aq->lock);
- AP_DBF_DBG("%s(%d,%d) queue dev config on\n",
- __func__, ac->id, dom);
+ pr_debug("%s(%d,%d) queue dev config on\n",
+ __func__, ac->id, dom);
ap_send_config_uevent(&aq->ap_dev, aq->config);
goto put_dev_and_continue;
}
@@ -1997,8 +2036,8 @@ static inline void ap_scan_adapter(int ap)
ap_scan_rm_card_dev_and_queue_devs(ac);
put_device(dev);
} else {
- AP_DBF_DBG("%s(%d) no type info (no APQN found), ignored\n",
- __func__, ap);
+ pr_debug("%s(%d) no type info (no APQN found), ignored\n",
+ __func__, ap);
}
return;
}
@@ -2010,8 +2049,8 @@ static inline void ap_scan_adapter(int ap)
ap_scan_rm_card_dev_and_queue_devs(ac);
put_device(dev);
} else {
- AP_DBF_DBG("%s(%d) no valid type (0) info, ignored\n",
- __func__, ap);
+ pr_debug("%s(%d) no valid type (0) info, ignored\n",
+ __func__, ap);
}
return;
}
@@ -2135,23 +2174,80 @@ static bool ap_get_configuration(void)
sizeof(struct ap_config_info)) != 0;
}
+/*
+ * ap_config_has_new_aps - Check current against old qci info if
+ * new adapters have appeared. Returns true if at least one new
+ * adapter in the apm mask is showing up. Existing adapters or
+ * receding adapters are not counted.
+ */
+static bool ap_config_has_new_aps(void)
+{
+
+ unsigned long m[BITS_TO_LONGS(AP_DEVICES)];
+
+ if (!ap_qci_info)
+ return false;
+
+ bitmap_andnot(m, (unsigned long *)ap_qci_info->apm,
+ (unsigned long *)ap_qci_info_old->apm, AP_DEVICES);
+ if (!bitmap_empty(m, AP_DEVICES))
+ return true;
+
+ return false;
+}
+
+/*
+ * ap_config_has_new_doms - Check current against old qci info if
+ * new (usage) domains have appeared. Returns true if at least one
+ * new domain in the aqm mask is showing up. Existing domains or
+ * receding domains are not counted.
+ */
+static bool ap_config_has_new_doms(void)
+{
+ unsigned long m[BITS_TO_LONGS(AP_DOMAINS)];
+
+ if (!ap_qci_info)
+ return false;
+
+ bitmap_andnot(m, (unsigned long *)ap_qci_info->aqm,
+ (unsigned long *)ap_qci_info_old->aqm, AP_DOMAINS);
+ if (!bitmap_empty(m, AP_DOMAINS))
+ return true;
+
+ return false;
+}
+
/**
* ap_scan_bus(): Scan the AP bus for new devices
- * Runs periodically, workqueue timer (ap_config_time)
- * @unused: Unused pointer.
+ * Always run under mutex ap_scan_bus_mutex protection
+ * which needs to get locked/unlocked by the caller!
+ * Returns true if any config change has been detected
+ * during the scan, otherwise false.
*/
-static void ap_scan_bus(struct work_struct *unused)
+static bool ap_scan_bus(void)
{
- int ap, config_changed = 0;
+ bool config_changed;
+ int ap;
- /* config change notify */
+ pr_debug(">%s\n", __func__);
+
+ /* (re-)fetch configuration via QCI */
config_changed = ap_get_configuration();
- if (config_changed)
+ if (config_changed) {
+ if (ap_config_has_new_aps() || ap_config_has_new_doms()) {
+ /*
+ * Appearance of new adapters and/or domains need to
+ * build new ap devices which need to get bound to an
+ * device driver. Thus reset the APQN bindings complete
+ * completion.
+ */
+ reinit_completion(&ap_apqn_bindings_complete);
+ }
+ /* post a config change notify */
notify_config_changed();
+ }
ap_select_domain();
- AP_DBF_DBG("%s running\n", __func__);
-
/* loop over all possible adapters */
for (ap = 0; ap <= ap_max_adapter_id; ap++)
ap_scan_adapter(ap);
@@ -2174,23 +2270,56 @@ static void ap_scan_bus(struct work_struct *unused)
}
if (atomic64_inc_return(&ap_scan_bus_count) == 1) {
- AP_DBF_DBG("%s init scan complete\n", __func__);
+ pr_debug("%s init scan complete\n", __func__);
ap_send_init_scan_done_uevent();
- ap_check_bindings_complete();
}
- mod_timer(&ap_config_timer, jiffies + ap_config_time * HZ);
+ ap_check_bindings_complete();
+
+ mod_timer(&ap_scan_bus_timer, jiffies + ap_scan_bus_time * HZ);
+
+ pr_debug("<%s config_changed=%d\n", __func__, config_changed);
+
+ return config_changed;
+}
+
+/*
+ * Callback for the ap_scan_bus_timer
+ * Runs periodically, workqueue timer (ap_scan_bus_time)
+ */
+static void ap_scan_bus_timer_callback(struct timer_list *unused)
+{
+ /*
+ * schedule work into the system long wq which when
+ * the work is finally executed, calls the AP bus scan.
+ */
+ queue_work(system_long_wq, &ap_scan_bus_work);
}
-static void ap_config_timeout(struct timer_list *unused)
+/*
+ * Callback for the ap_scan_bus_work
+ */
+static void ap_scan_bus_wq_callback(struct work_struct *unused)
{
- queue_work(system_long_wq, &ap_scan_work);
+ /*
+ * Try to invoke an ap_scan_bus(). If the mutex acquisition
+ * fails there is currently another task already running the
+ * AP scan bus and there is no need to wait and re-trigger the
+ * scan again. Please note at the end of the scan bus function
+ * the AP scan bus timer is re-armed which triggers then the
+ * ap_scan_bus_timer_callback which enqueues a work into the
+ * system_long_wq which invokes this function here again.
+ */
+ if (mutex_trylock(&ap_scan_bus_mutex)) {
+ ap_scan_bus_result = ap_scan_bus();
+ mutex_unlock(&ap_scan_bus_mutex);
+ }
}
static int __init ap_debug_init(void)
{
ap_dbf_info = debug_register("ap", 2, 1,
- DBF_MAX_SPRINTF_ARGS * sizeof(long));
+ AP_DBF_MAX_SPRINTF_ARGS * sizeof(long));
debug_register_view(ap_dbf_info, &debug_sprintf_view);
debug_set_level(ap_dbf_info, DBF_ERR);
@@ -2274,7 +2403,7 @@ static int __init ap_module_init(void)
ap_root_device->bus = &ap_bus_type;
/* Setup the AP bus rescan timer. */
- timer_setup(&ap_config_timer, ap_config_timeout, 0);
+ timer_setup(&ap_scan_bus_timer, ap_scan_bus_timer_callback, 0);
/*
* Setup the high resolution poll timer.
@@ -2292,7 +2421,7 @@ static int __init ap_module_init(void)
goto out_work;
}
- queue_work(system_long_wq, &ap_scan_work);
+ queue_work(system_long_wq, &ap_scan_bus_work);
return 0;
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index 98814839ef..59c7ed49aa 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -266,7 +266,7 @@ int ap_sb_available(void);
bool ap_is_se_guest(void);
void ap_wait(enum ap_sm_wait wait);
void ap_request_timeout(struct timer_list *t);
-void ap_bus_force_rescan(void);
+bool ap_bus_force_rescan(void);
int ap_test_config_usage_domain(unsigned int domain);
int ap_test_config_ctrl_domain(unsigned int domain);
@@ -352,8 +352,12 @@ int ap_parse_mask_str(const char *str,
* the return value is 0. If the timeout (in jiffies) hits instead
* -ETIME is returned. On failures negative return values are
* returned to the caller.
+ * It may be that the AP bus scan finds new devices. Then the
+ * condition that all APQNs are bound to their device drivers
+ * is reset to false and this call again blocks until either all
+ * APQNs are bound to a device driver or the timeout hits again.
*/
-int ap_wait_init_apqn_bindings_complete(unsigned long timeout);
+int ap_wait_apqn_bindings_complete(unsigned long timeout);
void ap_send_config_uevent(struct ap_device *ap_dev, bool cfg);
void ap_send_online_uevent(struct ap_device *ap_dev, int online);
diff --git a/drivers/s390/crypto/ap_debug.h b/drivers/s390/crypto/ap_debug.h
index c083ce88a9..2f66271b85 100644
--- a/drivers/s390/crypto/ap_debug.h
+++ b/drivers/s390/crypto/ap_debug.h
@@ -16,7 +16,7 @@
#define RC2ERR(rc) ((rc) ? DBF_ERR : DBF_INFO)
#define RC2WARN(rc) ((rc) ? DBF_WARN : DBF_INFO)
-#define DBF_MAX_SPRINTF_ARGS 6
+#define AP_DBF_MAX_SPRINTF_ARGS 6
#define AP_DBF(...) \
debug_sprintf_event(ap_dbf_info, ##__VA_ARGS__)
@@ -26,8 +26,6 @@
debug_sprintf_event(ap_dbf_info, DBF_WARN, ##__VA_ARGS__)
#define AP_DBF_INFO(...) \
debug_sprintf_event(ap_dbf_info, DBF_INFO, ##__VA_ARGS__)
-#define AP_DBF_DBG(...) \
- debug_sprintf_event(ap_dbf_info, DBF_DEBUG, ##__VA_ARGS__)
extern debug_info_t *ap_dbf_info;
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index 6825954431..6e4e8d324a 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -136,6 +136,8 @@ static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
switch (status.response_code) {
case AP_RESPONSE_NORMAL:
+ print_hex_dump_debug("aprpl: ", DUMP_PREFIX_ADDRESS, 16, 1,
+ aq->reply->msg, aq->reply->len, false);
aq->queue_count = max_t(int, 0, aq->queue_count - 1);
if (!status.queue_empty && !aq->queue_count)
aq->queue_count++;
@@ -169,6 +171,9 @@ static struct ap_queue_status ap_sm_recv(struct ap_queue *aq)
aq->queue_count = 0;
list_splice_init(&aq->pendingq, &aq->requestq);
aq->requestq_count += aq->pendingq_count;
+ pr_debug("%s queue 0x%02x.%04x rescheduled %d reqs (new req %d)\n",
+ __func__, AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid),
+ aq->pendingq_count, aq->requestq_count);
aq->pendingq_count = 0;
break;
default:
@@ -243,6 +248,8 @@ static enum ap_sm_wait ap_sm_write(struct ap_queue *aq)
/* Start the next request on the queue. */
ap_msg = list_entry(aq->requestq.next, struct ap_message, list);
+ print_hex_dump_debug("apreq: ", DUMP_PREFIX_ADDRESS, 16, 1,
+ ap_msg->msg, ap_msg->len, false);
status = __ap_send(qid, ap_msg->psmid,
ap_msg->msg, ap_msg->len,
ap_msg->flags & AP_MSG_FLAG_SPECIAL);
@@ -446,9 +453,9 @@ static enum ap_sm_wait ap_sm_assoc_wait(struct ap_queue *aq)
case AP_BS_Q_USABLE:
/* association is through */
aq->sm_state = AP_SM_STATE_IDLE;
- AP_DBF_DBG("%s queue 0x%02x.%04x associated with %u\n",
- __func__, AP_QID_CARD(aq->qid),
- AP_QID_QUEUE(aq->qid), aq->assoc_idx);
+ pr_debug("%s queue 0x%02x.%04x associated with %u\n",
+ __func__, AP_QID_CARD(aq->qid),
+ AP_QID_QUEUE(aq->qid), aq->assoc_idx);
return AP_SM_WAIT_NONE;
case AP_BS_Q_USABLE_NO_SECURE_KEY:
/* association still pending */
@@ -690,9 +697,9 @@ static ssize_t ap_functions_show(struct device *dev,
status = ap_test_queue(aq->qid, 1, &hwinfo);
if (status.response_code > AP_RESPONSE_BUSY) {
- AP_DBF_DBG("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
- __func__, status.response_code,
- AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+ pr_debug("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
+ __func__, status.response_code,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
return -EIO;
}
@@ -846,9 +853,9 @@ static ssize_t se_bind_show(struct device *dev,
status = ap_test_queue(aq->qid, 1, &hwinfo);
if (status.response_code > AP_RESPONSE_BUSY) {
- AP_DBF_DBG("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
- __func__, status.response_code,
- AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+ pr_debug("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
+ __func__, status.response_code,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
return -EIO;
}
@@ -974,9 +981,9 @@ static ssize_t se_associate_show(struct device *dev,
status = ap_test_queue(aq->qid, 1, &hwinfo);
if (status.response_code > AP_RESPONSE_BUSY) {
- AP_DBF_DBG("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
- __func__, status.response_code,
- AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
+ pr_debug("%s RC 0x%02x on tapq(0x%02x.%04x)\n",
+ __func__, status.response_code,
+ AP_QID_CARD(aq->qid), AP_QID_QUEUE(aq->qid));
return -EIO;
}
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
index 6cfb6b2340..dccf664a3d 100644
--- a/drivers/s390/crypto/pkey_api.c
+++ b/drivers/s390/crypto/pkey_api.c
@@ -42,24 +42,23 @@ MODULE_DESCRIPTION("s390 protected key interface");
* debug feature data and functions
*/
-static debug_info_t *debug_info;
+static debug_info_t *pkey_dbf_info;
-#define DEBUG_DBG(...) debug_sprintf_event(debug_info, 6, ##__VA_ARGS__)
-#define DEBUG_INFO(...) debug_sprintf_event(debug_info, 5, ##__VA_ARGS__)
-#define DEBUG_WARN(...) debug_sprintf_event(debug_info, 4, ##__VA_ARGS__)
-#define DEBUG_ERR(...) debug_sprintf_event(debug_info, 3, ##__VA_ARGS__)
+#define PKEY_DBF_INFO(...) debug_sprintf_event(pkey_dbf_info, 5, ##__VA_ARGS__)
+#define PKEY_DBF_WARN(...) debug_sprintf_event(pkey_dbf_info, 4, ##__VA_ARGS__)
+#define PKEY_DBF_ERR(...) debug_sprintf_event(pkey_dbf_info, 3, ##__VA_ARGS__)
static void __init pkey_debug_init(void)
{
/* 5 arguments per dbf entry (including the format string ptr) */
- debug_info = debug_register("pkey", 1, 1, 5 * sizeof(long));
- debug_register_view(debug_info, &debug_sprintf_view);
- debug_set_level(debug_info, 3);
+ pkey_dbf_info = debug_register("pkey", 1, 1, 5 * sizeof(long));
+ debug_register_view(pkey_dbf_info, &debug_sprintf_view);
+ debug_set_level(pkey_dbf_info, 3);
}
static void __exit pkey_debug_exit(void)
{
- debug_unregister(debug_info);
+ debug_unregister(pkey_dbf_info);
}
/* inside view of a protected key token (only type 0x00 version 0x01) */
@@ -163,14 +162,14 @@ static int pkey_clr2protkey(u32 keytype, const u8 *clrkey,
fc = CPACF_PCKMO_ENC_ECC_ED448_KEY;
break;
default:
- DEBUG_ERR("%s unknown/unsupported keytype %u\n",
- __func__, keytype);
+ PKEY_DBF_ERR("%s unknown/unsupported keytype %u\n",
+ __func__, keytype);
return -EINVAL;
}
if (*protkeylen < keysize + AES_WK_VP_SIZE) {
- DEBUG_ERR("%s prot key buffer size too small: %u < %d\n",
- __func__, *protkeylen, keysize + AES_WK_VP_SIZE);
+ PKEY_DBF_ERR("%s prot key buffer size too small: %u < %d\n",
+ __func__, *protkeylen, keysize + AES_WK_VP_SIZE);
return -EINVAL;
}
@@ -182,7 +181,7 @@ static int pkey_clr2protkey(u32 keytype, const u8 *clrkey,
}
/* check for the pckmo subfunction we need now */
if (!cpacf_test_func(&pckmo_functions, fc)) {
- DEBUG_ERR("%s pckmo functions not available\n", __func__);
+ PKEY_DBF_ERR("%s pckmo functions not available\n", __func__);
return -ENODEV;
}
@@ -244,7 +243,7 @@ static int pkey_skey2pkey(const u8 *key, u8 *protkey,
}
if (rc)
- DEBUG_DBG("%s failed rc=%d\n", __func__, rc);
+ pr_debug("%s failed rc=%d\n", __func__, rc);
return rc;
}
@@ -283,7 +282,7 @@ static int pkey_clr2ep11key(const u8 *clrkey, size_t clrkeylen,
out:
kfree(apqns);
if (rc)
- DEBUG_DBG("%s failed rc=%d\n", __func__, rc);
+ pr_debug("%s failed rc=%d\n", __func__, rc);
return rc;
}
@@ -294,33 +293,36 @@ static int pkey_ep11key2pkey(const u8 *key, size_t keylen,
u8 *protkey, u32 *protkeylen, u32 *protkeytype)
{
u32 nr_apqns, *apqns = NULL;
+ int i, j, rc = -ENODEV;
u16 card, dom;
- int i, rc;
zcrypt_wait_api_operational();
- /* build a list of apqns suitable for this key */
- rc = ep11_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF,
- ZCRYPT_CEX7,
- ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4,
- ep11_kb_wkvp(key, keylen));
- if (rc)
- goto out;
+ /* try two times in case of failure */
+ for (i = 0; i < 2 && rc; i++) {
- /* go through the list of apqns and try to derive an pkey */
- for (rc = -ENODEV, i = 0; i < nr_apqns; i++) {
- card = apqns[i] >> 16;
- dom = apqns[i] & 0xFFFF;
- rc = ep11_kblob2protkey(card, dom, key, keylen,
- protkey, protkeylen, protkeytype);
- if (rc == 0)
- break;
+ /* build a list of apqns suitable for this key */
+ rc = ep11_findcard2(&apqns, &nr_apqns, 0xFFFF, 0xFFFF,
+ ZCRYPT_CEX7,
+ ap_is_se_guest() ? EP11_API_V6 : EP11_API_V4,
+ ep11_kb_wkvp(key, keylen));
+ if (rc)
+ continue; /* retry findcard on failure */
+
+ /* go through the list of apqns and try to derive an pkey */
+ for (rc = -ENODEV, j = 0; j < nr_apqns && rc; j++) {
+ card = apqns[j] >> 16;
+ dom = apqns[j] & 0xFFFF;
+ rc = ep11_kblob2protkey(card, dom, key, keylen,
+ protkey, protkeylen, protkeytype);
+ }
+
+ kfree(apqns);
}
-out:
- kfree(apqns);
if (rc)
- DEBUG_DBG("%s failed rc=%d\n", __func__, rc);
+ pr_debug("%s failed rc=%d\n", __func__, rc);
+
return rc;
}
@@ -336,7 +338,7 @@ static int pkey_verifykey(const struct pkey_seckey *seckey,
int rc;
/* check the secure key for valid AES secure key */
- rc = cca_check_secaeskeytoken(debug_info, 3, (u8 *)seckey, 0);
+ rc = cca_check_secaeskeytoken(pkey_dbf_info, 3, (u8 *)seckey, 0);
if (rc)
goto out;
if (pattributes)
@@ -351,7 +353,7 @@ static int pkey_verifykey(const struct pkey_seckey *seckey,
if (rc > 0) {
/* key mkvp matches to old master key mkvp */
- DEBUG_DBG("%s secure key has old mkvp\n", __func__);
+ pr_debug("%s secure key has old mkvp\n", __func__);
if (pattributes)
*pattributes |= PKEY_VERIFY_ATTR_OLD_MKVP;
rc = 0;
@@ -363,7 +365,7 @@ static int pkey_verifykey(const struct pkey_seckey *seckey,
*pdomain = domain;
out:
- DEBUG_DBG("%s rc=%d\n", __func__, rc);
+ pr_debug("%s rc=%d\n", __func__, rc);
return rc;
}
@@ -379,8 +381,8 @@ static int pkey_genprotkey(u32 keytype, u8 *protkey,
keysize = pkey_keytype_aes_to_size(keytype);
if (!keysize) {
- DEBUG_ERR("%s unknown/unsupported keytype %d\n", __func__,
- keytype);
+ PKEY_DBF_ERR("%s unknown/unsupported keytype %d\n", __func__,
+ keytype);
return -EINVAL;
}
@@ -428,13 +430,13 @@ static int pkey_verifyprotkey(const u8 *protkey, u32 protkeylen,
fc = CPACF_KMC_PAES_256;
break;
default:
- DEBUG_ERR("%s unknown/unsupported keytype %u\n", __func__,
- protkeytype);
+ PKEY_DBF_ERR("%s unknown/unsupported keytype %u\n", __func__,
+ protkeytype);
return -EINVAL;
}
if (protkeylen != pkeylen) {
- DEBUG_ERR("%s invalid protected key size %u for keytype %u\n",
- __func__, protkeylen, protkeytype);
+ PKEY_DBF_ERR("%s invalid protected key size %u for keytype %u\n",
+ __func__, protkeylen, protkeytype);
return -EINVAL;
}
@@ -446,7 +448,7 @@ static int pkey_verifyprotkey(const u8 *protkey, u32 protkeylen,
k = cpacf_kmc(fc | CPACF_ENCRYPT, &param, null_msg, dest_buf,
sizeof(null_msg));
if (k != sizeof(null_msg)) {
- DEBUG_ERR("%s protected key is not valid\n", __func__);
+ PKEY_DBF_ERR("%s protected key is not valid\n", __func__);
return -EKEYREJECTED;
}
@@ -464,13 +466,13 @@ static int nonccatokaes2pkey(const struct clearkeytoken *t,
keysize = pkey_keytype_aes_to_size(t->keytype);
if (!keysize) {
- DEBUG_ERR("%s unknown/unsupported keytype %u\n",
- __func__, t->keytype);
+ PKEY_DBF_ERR("%s unknown/unsupported keytype %u\n",
+ __func__, t->keytype);
return -EINVAL;
}
if (t->len != keysize) {
- DEBUG_ERR("%s non clear key aes token: invalid key len %u\n",
- __func__, t->len);
+ PKEY_DBF_ERR("%s non clear key aes token: invalid key len %u\n",
+ __func__, t->len);
return -EINVAL;
}
@@ -505,7 +507,7 @@ try_via_ep11:
goto out;
failure:
- DEBUG_ERR("%s unable to build protected key from clear", __func__);
+ PKEY_DBF_ERR("%s unable to build protected key from clear", __func__);
out:
kfree(tmpbuf);
@@ -536,14 +538,14 @@ static int nonccatokecc2pkey(const struct clearkeytoken *t,
keylen = 64;
break;
default:
- DEBUG_ERR("%s unknown/unsupported keytype %u\n",
- __func__, t->keytype);
+ PKEY_DBF_ERR("%s unknown/unsupported keytype %u\n",
+ __func__, t->keytype);
return -EINVAL;
}
if (t->len != keylen) {
- DEBUG_ERR("%s non clear key ecc token: invalid key len %u\n",
- __func__, t->len);
+ PKEY_DBF_ERR("%s non clear key ecc token: invalid key len %u\n",
+ __func__, t->len);
return -EINVAL;
}
@@ -551,8 +553,8 @@ static int nonccatokecc2pkey(const struct clearkeytoken *t,
rc = pkey_clr2protkey(t->keytype, t->clearkey,
protkey, protkeylen, protkeytype);
if (rc) {
- DEBUG_ERR("%s unable to build protected key from clear",
- __func__);
+ PKEY_DBF_ERR("%s unable to build protected key from clear",
+ __func__);
}
return rc;
@@ -604,15 +606,15 @@ static int pkey_nonccatok2pkey(const u8 *key, u32 keylen,
protkeylen, protkeytype);
break;
default:
- DEBUG_ERR("%s unknown/unsupported non cca clear key type %u\n",
- __func__, t->keytype);
+ PKEY_DBF_ERR("%s unknown/unsupported non cca clear key type %u\n",
+ __func__, t->keytype);
return -EINVAL;
}
break;
}
case TOKVER_EP11_AES: {
/* check ep11 key for exportable as protected key */
- rc = ep11_check_aes_key(debug_info, 3, key, keylen, 1);
+ rc = ep11_check_aes_key(pkey_dbf_info, 3, key, keylen, 1);
if (rc)
goto out;
rc = pkey_ep11key2pkey(key, keylen,
@@ -621,15 +623,16 @@ static int pkey_nonccatok2pkey(const u8 *key, u32 keylen,
}
case TOKVER_EP11_AES_WITH_HEADER:
/* check ep11 key with header for exportable as protected key */
- rc = ep11_check_aes_key_with_hdr(debug_info, 3, key, keylen, 1);
+ rc = ep11_check_aes_key_with_hdr(pkey_dbf_info,
+ 3, key, keylen, 1);
if (rc)
goto out;
rc = pkey_ep11key2pkey(key, keylen,
protkey, protkeylen, protkeytype);
break;
default:
- DEBUG_ERR("%s unknown/unsupported non-CCA token version %d\n",
- __func__, hdr->version);
+ PKEY_DBF_ERR("%s unknown/unsupported non-CCA token version %d\n",
+ __func__, hdr->version);
}
out:
@@ -654,8 +657,8 @@ static int pkey_ccainttok2pkey(const u8 *key, u32 keylen,
return -EINVAL;
break;
default:
- DEBUG_ERR("%s unknown/unsupported CCA internal token version %d\n",
- __func__, hdr->version);
+ PKEY_DBF_ERR("%s unknown/unsupported CCA internal token version %d\n",
+ __func__, hdr->version);
return -EINVAL;
}
@@ -672,7 +675,7 @@ int pkey_keyblob2pkey(const u8 *key, u32 keylen,
int rc;
if (keylen < sizeof(struct keytoken_header)) {
- DEBUG_ERR("%s invalid keylen %d\n", __func__, keylen);
+ PKEY_DBF_ERR("%s invalid keylen %d\n", __func__, keylen);
return -EINVAL;
}
@@ -686,12 +689,12 @@ int pkey_keyblob2pkey(const u8 *key, u32 keylen,
protkey, protkeylen, protkeytype);
break;
default:
- DEBUG_ERR("%s unknown/unsupported blob type %d\n",
- __func__, hdr->type);
+ PKEY_DBF_ERR("%s unknown/unsupported blob type %d\n",
+ __func__, hdr->type);
return -EINVAL;
}
- DEBUG_DBG("%s rc=%d\n", __func__, rc);
+ pr_debug("%s rc=%d\n", __func__, rc);
return rc;
}
EXPORT_SYMBOL(pkey_keyblob2pkey);
@@ -839,7 +842,7 @@ static int pkey_verifykey2(const u8 *key, size_t keylen,
hdr->version == TOKVER_CCA_AES) {
struct secaeskeytoken *t = (struct secaeskeytoken *)key;
- rc = cca_check_secaeskeytoken(debug_info, 3, key, 0);
+ rc = cca_check_secaeskeytoken(pkey_dbf_info, 3, key, 0);
if (rc)
goto out;
if (ktype)
@@ -869,7 +872,7 @@ static int pkey_verifykey2(const u8 *key, size_t keylen,
hdr->version == TOKVER_CCA_VLSC) {
struct cipherkeytoken *t = (struct cipherkeytoken *)key;
- rc = cca_check_secaescipherkey(debug_info, 3, key, 0, 1);
+ rc = cca_check_secaescipherkey(pkey_dbf_info, 3, key, 0, 1);
if (rc)
goto out;
if (ktype)
@@ -907,7 +910,7 @@ static int pkey_verifykey2(const u8 *key, size_t keylen,
struct ep11keyblob *kb = (struct ep11keyblob *)key;
int api;
- rc = ep11_check_aes_key(debug_info, 3, key, keylen, 1);
+ rc = ep11_check_aes_key(pkey_dbf_info, 3, key, keylen, 1);
if (rc)
goto out;
if (ktype)
@@ -933,8 +936,8 @@ static int pkey_verifykey2(const u8 *key, size_t keylen,
struct ep11kblob_header *kh = (struct ep11kblob_header *)key;
int api;
- rc = ep11_check_aes_key_with_hdr(debug_info, 3,
- key, keylen, 1);
+ rc = ep11_check_aes_key_with_hdr(pkey_dbf_info,
+ 3, key, keylen, 1);
if (rc)
goto out;
if (ktype)
@@ -981,25 +984,27 @@ static int pkey_keyblob2pkey2(const struct pkey_apqn *apqns, size_t nr_apqns,
if (hdr->version == TOKVER_CCA_AES) {
if (keylen != sizeof(struct secaeskeytoken))
return -EINVAL;
- if (cca_check_secaeskeytoken(debug_info, 3, key, 0))
+ if (cca_check_secaeskeytoken(pkey_dbf_info, 3, key, 0))
return -EINVAL;
} else if (hdr->version == TOKVER_CCA_VLSC) {
if (keylen < hdr->len || keylen > MAXCCAVLSCTOKENSIZE)
return -EINVAL;
- if (cca_check_secaescipherkey(debug_info, 3, key, 0, 1))
+ if (cca_check_secaescipherkey(pkey_dbf_info,
+ 3, key, 0, 1))
return -EINVAL;
} else {
- DEBUG_ERR("%s unknown CCA internal token version %d\n",
- __func__, hdr->version);
+ PKEY_DBF_ERR("%s unknown CCA internal token version %d\n",
+ __func__, hdr->version);
return -EINVAL;
}
} else if (hdr->type == TOKTYPE_NON_CCA) {
if (hdr->version == TOKVER_EP11_AES) {
- if (ep11_check_aes_key(debug_info, 3, key, keylen, 1))
+ if (ep11_check_aes_key(pkey_dbf_info,
+ 3, key, keylen, 1))
return -EINVAL;
} else if (hdr->version == TOKVER_EP11_AES_WITH_HEADER) {
- if (ep11_check_aes_key_with_hdr(debug_info, 3,
- key, keylen, 1))
+ if (ep11_check_aes_key_with_hdr(pkey_dbf_info,
+ 3, key, keylen, 1))
return -EINVAL;
} else {
return pkey_nonccatok2pkey(key, keylen,
@@ -1007,8 +1012,8 @@ static int pkey_keyblob2pkey2(const struct pkey_apqn *apqns, size_t nr_apqns,
protkeytype);
}
} else {
- DEBUG_ERR("%s unknown/unsupported blob type %d\n",
- __func__, hdr->type);
+ PKEY_DBF_ERR("%s unknown/unsupported blob type %d\n",
+ __func__, hdr->type);
return -EINVAL;
}
@@ -1234,50 +1239,53 @@ static int pkey_keyblob2pkey3(const struct pkey_apqn *apqns, size_t nr_apqns,
hdr->version == TOKVER_EP11_AES_WITH_HEADER &&
is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) {
/* EP11 AES key blob with header */
- if (ep11_check_aes_key_with_hdr(debug_info, 3, key, keylen, 1))
+ if (ep11_check_aes_key_with_hdr(pkey_dbf_info,
+ 3, key, keylen, 1))
return -EINVAL;
} else if (hdr->type == TOKTYPE_NON_CCA &&
hdr->version == TOKVER_EP11_ECC_WITH_HEADER &&
is_ep11_keyblob(key + sizeof(struct ep11kblob_header))) {
/* EP11 ECC key blob with header */
- if (ep11_check_ecc_key_with_hdr(debug_info, 3, key, keylen, 1))
+ if (ep11_check_ecc_key_with_hdr(pkey_dbf_info,
+ 3, key, keylen, 1))
return -EINVAL;
} else if (hdr->type == TOKTYPE_NON_CCA &&
hdr->version == TOKVER_EP11_AES &&
is_ep11_keyblob(key)) {
/* EP11 AES key blob with header in session field */
- if (ep11_check_aes_key(debug_info, 3, key, keylen, 1))
+ if (ep11_check_aes_key(pkey_dbf_info, 3, key, keylen, 1))
return -EINVAL;
} else if (hdr->type == TOKTYPE_CCA_INTERNAL) {
if (hdr->version == TOKVER_CCA_AES) {
/* CCA AES data key */
if (keylen != sizeof(struct secaeskeytoken))
return -EINVAL;
- if (cca_check_secaeskeytoken(debug_info, 3, key, 0))
+ if (cca_check_secaeskeytoken(pkey_dbf_info, 3, key, 0))
return -EINVAL;
} else if (hdr->version == TOKVER_CCA_VLSC) {
/* CCA AES cipher key */
if (keylen < hdr->len || keylen > MAXCCAVLSCTOKENSIZE)
return -EINVAL;
- if (cca_check_secaescipherkey(debug_info, 3, key, 0, 1))
+ if (cca_check_secaescipherkey(pkey_dbf_info,
+ 3, key, 0, 1))
return -EINVAL;
} else {
- DEBUG_ERR("%s unknown CCA internal token version %d\n",
- __func__, hdr->version);
+ PKEY_DBF_ERR("%s unknown CCA internal token version %d\n",
+ __func__, hdr->version);
return -EINVAL;
}
} else if (hdr->type == TOKTYPE_CCA_INTERNAL_PKA) {
/* CCA ECC (private) key */
if (keylen < sizeof(struct eccprivkeytoken))
return -EINVAL;
- if (cca_check_sececckeytoken(debug_info, 3, key, keylen, 1))
+ if (cca_check_sececckeytoken(pkey_dbf_info, 3, key, keylen, 1))
return -EINVAL;
} else if (hdr->type == TOKTYPE_NON_CCA) {
return pkey_nonccatok2pkey(key, keylen,
protkey, protkeylen, protkeytype);
} else {
- DEBUG_ERR("%s unknown/unsupported blob type %d\n",
- __func__, hdr->type);
+ PKEY_DBF_ERR("%s unknown/unsupported blob type %d\n",
+ __func__, hdr->type);
return -EINVAL;
}
@@ -1350,7 +1358,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
return -EFAULT;
rc = cca_genseckey(kgs.cardnr, kgs.domain,
kgs.keytype, kgs.seckey.seckey);
- DEBUG_DBG("%s cca_genseckey()=%d\n", __func__, rc);
+ pr_debug("%s cca_genseckey()=%d\n", __func__, rc);
if (rc)
break;
if (copy_to_user(ugs, &kgs, sizeof(kgs)))
@@ -1365,7 +1373,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
return -EFAULT;
rc = cca_clr2seckey(kcs.cardnr, kcs.domain, kcs.keytype,
kcs.clrkey.clrkey, kcs.seckey.seckey);
- DEBUG_DBG("%s cca_clr2seckey()=%d\n", __func__, rc);
+ pr_debug("%s cca_clr2seckey()=%d\n", __func__, rc);
if (rc)
break;
if (copy_to_user(ucs, &kcs, sizeof(kcs)))
@@ -1383,7 +1391,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
rc = cca_sec2protkey(ksp.cardnr, ksp.domain,
ksp.seckey.seckey, ksp.protkey.protkey,
&ksp.protkey.len, &ksp.protkey.type);
- DEBUG_DBG("%s cca_sec2protkey()=%d\n", __func__, rc);
+ pr_debug("%s cca_sec2protkey()=%d\n", __func__, rc);
if (rc)
break;
if (copy_to_user(usp, &ksp, sizeof(ksp)))
@@ -1400,7 +1408,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
rc = pkey_clr2protkey(kcp.keytype, kcp.clrkey.clrkey,
kcp.protkey.protkey,
&kcp.protkey.len, &kcp.protkey.type);
- DEBUG_DBG("%s pkey_clr2protkey()=%d\n", __func__, rc);
+ pr_debug("%s pkey_clr2protkey()=%d\n", __func__, rc);
if (rc)
break;
if (copy_to_user(ucp, &kcp, sizeof(kcp)))
@@ -1416,7 +1424,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
return -EFAULT;
rc = cca_findcard(kfc.seckey.seckey,
&kfc.cardnr, &kfc.domain, 1);
- DEBUG_DBG("%s cca_findcard()=%d\n", __func__, rc);
+ pr_debug("%s cca_findcard()=%d\n", __func__, rc);
if (rc < 0)
break;
if (copy_to_user(ufc, &kfc, sizeof(kfc)))
@@ -1432,7 +1440,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
ksp.protkey.len = sizeof(ksp.protkey.protkey);
rc = pkey_skey2pkey(ksp.seckey.seckey, ksp.protkey.protkey,
&ksp.protkey.len, &ksp.protkey.type);
- DEBUG_DBG("%s pkey_skey2pkey()=%d\n", __func__, rc);
+ pr_debug("%s pkey_skey2pkey()=%d\n", __func__, rc);
if (rc)
break;
if (copy_to_user(usp, &ksp, sizeof(ksp)))
@@ -1447,7 +1455,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
return -EFAULT;
rc = pkey_verifykey(&kvk.seckey, &kvk.cardnr, &kvk.domain,
&kvk.keysize, &kvk.attributes);
- DEBUG_DBG("%s pkey_verifykey()=%d\n", __func__, rc);
+ pr_debug("%s pkey_verifykey()=%d\n", __func__, rc);
if (rc)
break;
if (copy_to_user(uvk, &kvk, sizeof(kvk)))
@@ -1463,7 +1471,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
kgp.protkey.len = sizeof(kgp.protkey.protkey);
rc = pkey_genprotkey(kgp.keytype, kgp.protkey.protkey,
&kgp.protkey.len, &kgp.protkey.type);
- DEBUG_DBG("%s pkey_genprotkey()=%d\n", __func__, rc);
+ pr_debug("%s pkey_genprotkey()=%d\n", __func__, rc);
if (rc)
break;
if (copy_to_user(ugp, &kgp, sizeof(kgp)))
@@ -1478,7 +1486,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
return -EFAULT;
rc = pkey_verifyprotkey(kvp.protkey.protkey,
kvp.protkey.len, kvp.protkey.type);
- DEBUG_DBG("%s pkey_verifyprotkey()=%d\n", __func__, rc);
+ pr_debug("%s pkey_verifyprotkey()=%d\n", __func__, rc);
break;
}
case PKEY_KBLOB2PROTK: {
@@ -1494,7 +1502,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
ktp.protkey.len = sizeof(ktp.protkey.protkey);
rc = pkey_keyblob2pkey(kkey, ktp.keylen, ktp.protkey.protkey,
&ktp.protkey.len, &ktp.protkey.type);
- DEBUG_DBG("%s pkey_keyblob2pkey()=%d\n", __func__, rc);
+ pr_debug("%s pkey_keyblob2pkey()=%d\n", __func__, rc);
memzero_explicit(kkey, ktp.keylen);
kfree(kkey);
if (rc)
@@ -1523,7 +1531,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
rc = pkey_genseckey2(apqns, kgs.apqn_entries,
kgs.type, kgs.size, kgs.keygenflags,
kkey, &klen);
- DEBUG_DBG("%s pkey_genseckey2()=%d\n", __func__, rc);
+ pr_debug("%s pkey_genseckey2()=%d\n", __func__, rc);
kfree(apqns);
if (rc) {
kfree(kkey);
@@ -1565,7 +1573,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
rc = pkey_clr2seckey2(apqns, kcs.apqn_entries,
kcs.type, kcs.size, kcs.keygenflags,
kcs.clrkey.clrkey, kkey, &klen);
- DEBUG_DBG("%s pkey_clr2seckey2()=%d\n", __func__, rc);
+ pr_debug("%s pkey_clr2seckey2()=%d\n", __func__, rc);
kfree(apqns);
if (rc) {
kfree(kkey);
@@ -1601,7 +1609,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
rc = pkey_verifykey2(kkey, kvk.keylen,
&kvk.cardnr, &kvk.domain,
&kvk.type, &kvk.size, &kvk.flags);
- DEBUG_DBG("%s pkey_verifykey2()=%d\n", __func__, rc);
+ pr_debug("%s pkey_verifykey2()=%d\n", __func__, rc);
kfree(kkey);
if (rc)
break;
@@ -1630,7 +1638,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
kkey, ktp.keylen,
ktp.protkey.protkey, &ktp.protkey.len,
&ktp.protkey.type);
- DEBUG_DBG("%s pkey_keyblob2pkey2()=%d\n", __func__, rc);
+ pr_debug("%s pkey_keyblob2pkey2()=%d\n", __func__, rc);
kfree(apqns);
memzero_explicit(kkey, ktp.keylen);
kfree(kkey);
@@ -1664,7 +1672,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
}
rc = pkey_apqns4key(kkey, kak.keylen, kak.flags,
apqns, &nr_apqns);
- DEBUG_DBG("%s pkey_apqns4key()=%d\n", __func__, rc);
+ pr_debug("%s pkey_apqns4key()=%d\n", __func__, rc);
kfree(kkey);
if (rc && rc != -ENOSPC) {
kfree(apqns);
@@ -1707,7 +1715,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
}
rc = pkey_apqns4keytype(kat.type, kat.cur_mkvp, kat.alt_mkvp,
kat.flags, apqns, &nr_apqns);
- DEBUG_DBG("%s pkey_apqns4keytype()=%d\n", __func__, rc);
+ pr_debug("%s pkey_apqns4keytype()=%d\n", __func__, rc);
if (rc && rc != -ENOSPC) {
kfree(apqns);
break;
@@ -1757,7 +1765,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
rc = pkey_keyblob2pkey3(apqns, ktp.apqn_entries,
kkey, ktp.keylen,
protkey, &protkeylen, &ktp.pkeytype);
- DEBUG_DBG("%s pkey_keyblob2pkey3()=%d\n", __func__, rc);
+ pr_debug("%s pkey_keyblob2pkey3()=%d\n", __func__, rc);
kfree(apqns);
memzero_explicit(kkey, ktp.keylen);
kfree(kkey);
diff --git a/drivers/s390/crypto/vfio_ap_drv.c b/drivers/s390/crypto/vfio_ap_drv.c
index a5ab03e42f..4aeb3e1213 100644
--- a/drivers/s390/crypto/vfio_ap_drv.c
+++ b/drivers/s390/crypto/vfio_ap_drv.c
@@ -60,7 +60,7 @@ static void vfio_ap_matrix_dev_release(struct device *dev)
kfree(matrix_dev);
}
-static struct bus_type matrix_bus = {
+static const struct bus_type matrix_bus = {
.name = "matrix",
};
diff --git a/drivers/s390/crypto/vfio_ap_ops.c b/drivers/s390/crypto/vfio_ap_ops.c
index 983b3b1619..fc169bc615 100644
--- a/drivers/s390/crypto/vfio_ap_ops.c
+++ b/drivers/s390/crypto/vfio_ap_ops.c
@@ -659,6 +659,21 @@ static bool vfio_ap_mdev_filter_cdoms(struct ap_matrix_mdev *matrix_mdev)
AP_DOMAINS);
}
+static bool _queue_passable(struct vfio_ap_queue *q)
+{
+ if (!q)
+ return false;
+
+ switch (q->reset_status.response_code) {
+ case AP_RESPONSE_NORMAL:
+ case AP_RESPONSE_DECONFIGURED:
+ case AP_RESPONSE_CHECKSTOPPED:
+ return true;
+ default:
+ return false;
+ }
+}
+
/*
* vfio_ap_mdev_filter_matrix - filter the APQNs assigned to the matrix mdev
* to ensure no queue devices are passed through to
@@ -687,7 +702,6 @@ static bool vfio_ap_mdev_filter_matrix(struct ap_matrix_mdev *matrix_mdev,
unsigned long apid, apqi, apqn;
DECLARE_BITMAP(prev_shadow_apm, AP_DEVICES);
DECLARE_BITMAP(prev_shadow_aqm, AP_DOMAINS);
- struct vfio_ap_queue *q;
bitmap_copy(prev_shadow_apm, matrix_mdev->shadow_apcb.apm, AP_DEVICES);
bitmap_copy(prev_shadow_aqm, matrix_mdev->shadow_apcb.aqm, AP_DOMAINS);
@@ -716,8 +730,7 @@ static bool vfio_ap_mdev_filter_matrix(struct ap_matrix_mdev *matrix_mdev,
* hardware device.
*/
apqn = AP_MKQID(apid, apqi);
- q = vfio_ap_mdev_get_queue(matrix_mdev, apqn);
- if (!q || q->reset_status.response_code) {
+ if (!_queue_passable(vfio_ap_mdev_get_queue(matrix_mdev, apqn))) {
clear_bit_inv(apid, matrix_mdev->shadow_apcb.apm);
/*
@@ -1691,6 +1704,7 @@ static int apq_status_check(int apqn, struct ap_queue_status *status)
switch (status->response_code) {
case AP_RESPONSE_NORMAL:
case AP_RESPONSE_DECONFIGURED:
+ case AP_RESPONSE_CHECKSTOPPED:
return 0;
case AP_RESPONSE_RESET_IN_PROGRESS:
case AP_RESPONSE_BUSY:
@@ -1747,14 +1761,6 @@ static void apq_reset_check(struct work_struct *reset_work)
memcpy(&q->reset_status, &status, sizeof(status));
continue;
}
- /*
- * When an AP adapter is deconfigured, the
- * associated queues are reset, so let's set the
- * status response code to 0 so the queue may be
- * passed through (i.e., not filtered)
- */
- if (status.response_code == AP_RESPONSE_DECONFIGURED)
- q->reset_status.response_code = 0;
if (q->saved_isc != VFIO_AP_ISC_INVALID)
vfio_ap_free_aqic_resources(q);
break;
@@ -1781,12 +1787,7 @@ static void vfio_ap_mdev_reset_queue(struct vfio_ap_queue *q)
queue_work(system_long_wq, &q->reset_work);
break;
case AP_RESPONSE_DECONFIGURED:
- /*
- * When an AP adapter is deconfigured, the associated
- * queues are reset, so let's set the status response code to 0
- * so the queue may be passed through (i.e., not filtered).
- */
- q->reset_status.response_code = 0;
+ case AP_RESPONSE_CHECKSTOPPED:
vfio_ap_free_aqic_resources(q);
break;
default:
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index 53ddae5ad8..eba07f8ef3 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -12,6 +12,9 @@
* Multiple device nodes: Harald Freudenberger <freude@linux.ibm.com>
*/
+#define KMSG_COMPONENT "zcrypt"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
#include <linux/module.h>
#include <linux/init.h>
#include <linux/interrupt.h>
@@ -57,10 +60,6 @@ DEFINE_SPINLOCK(zcrypt_list_lock);
LIST_HEAD(zcrypt_card_list);
static atomic_t zcrypt_open_count = ATOMIC_INIT(0);
-static atomic_t zcrypt_rescan_count = ATOMIC_INIT(0);
-
-atomic_t zcrypt_rescan_req = ATOMIC_INIT(0);
-EXPORT_SYMBOL(zcrypt_rescan_req);
static LIST_HEAD(zcrypt_ops_list);
@@ -69,20 +68,15 @@ debug_info_t *zcrypt_dbf_info;
/*
* Process a rescan of the transport layer.
- *
- * Returns 1, if the rescan has been processed, otherwise 0.
+ * Runs a synchronous AP bus rescan.
+ * Returns true if something has changed (for example the
+ * bus scan has found and build up new devices) and it is
+ * worth to do a retry. Otherwise false is returned meaning
+ * no changes on the AP bus level.
*/
-static inline int zcrypt_process_rescan(void)
-{
- if (atomic_read(&zcrypt_rescan_req)) {
- atomic_set(&zcrypt_rescan_req, 0);
- atomic_inc(&zcrypt_rescan_count);
- ap_bus_force_rescan();
- ZCRYPT_DBF_INFO("%s rescan count=%07d\n", __func__,
- atomic_inc_return(&zcrypt_rescan_count));
- return 1;
- }
- return 0;
+static inline bool zcrypt_process_rescan(void)
+{
+ return ap_bus_force_rescan();
}
void zcrypt_msgtype_register(struct zcrypt_ops *zops)
@@ -113,7 +107,11 @@ EXPORT_SYMBOL(zcrypt_msgtype);
struct zcdn_device;
-static struct class *zcrypt_class;
+static void zcdn_device_release(struct device *dev);
+static const struct class zcrypt_class = {
+ .name = ZCRYPT_NAME,
+ .dev_release = zcdn_device_release,
+};
static dev_t zcrypt_devt;
static struct cdev zcrypt_cdev;
@@ -136,7 +134,7 @@ static int zcdn_destroy(const char *name);
*/
static inline struct zcdn_device *find_zcdndev_by_name(const char *name)
{
- struct device *dev = class_find_device_by_name(zcrypt_class, name);
+ struct device *dev = class_find_device_by_name(&zcrypt_class, name);
return dev ? to_zcdn_dev(dev) : NULL;
}
@@ -148,7 +146,7 @@ static inline struct zcdn_device *find_zcdndev_by_name(const char *name)
*/
static inline struct zcdn_device *find_zcdndev_by_devt(dev_t devt)
{
- struct device *dev = class_find_device_by_devt(zcrypt_class, devt);
+ struct device *dev = class_find_device_by_devt(&zcrypt_class, devt);
return dev ? to_zcdn_dev(dev) : NULL;
}
@@ -402,7 +400,7 @@ static int zcdn_create(const char *name)
goto unlockout;
}
zcdndev->device.release = zcdn_device_release;
- zcdndev->device.class = zcrypt_class;
+ zcdndev->device.class = &zcrypt_class;
zcdndev->device.devt = devt;
zcdndev->device.groups = zcdn_dev_attr_groups;
if (name[0])
@@ -717,8 +715,7 @@ static long zcrypt_rsa_modexpo(struct ap_perms *perms,
spin_unlock(&zcrypt_list_lock);
if (!pref_zq) {
- ZCRYPT_DBF_DBG("%s no matching queue found => ENODEV\n",
- __func__);
+ pr_debug("%s no matching queue found => ENODEV\n", __func__);
rc = -ENODEV;
goto out;
}
@@ -822,8 +819,7 @@ static long zcrypt_rsa_crt(struct ap_perms *perms,
spin_unlock(&zcrypt_list_lock);
if (!pref_zq) {
- ZCRYPT_DBF_DBG("%s no matching queue found => ENODEV\n",
- __func__);
+ pr_debug("%s no matching queue found => ENODEV\n", __func__);
rc = -ENODEV;
goto out;
}
@@ -867,6 +863,8 @@ static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms,
rc = prep_cca_ap_msg(userspace, xcrb, &ap_msg, &func_code, &domain);
if (rc)
goto out;
+ print_hex_dump_debug("ccareq: ", DUMP_PREFIX_ADDRESS, 16, 1,
+ ap_msg.msg, ap_msg.len, false);
tdom = *domain;
if (perms != &ap_perms && tdom < AP_DOMAINS) {
@@ -942,8 +940,8 @@ static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms,
spin_unlock(&zcrypt_list_lock);
if (!pref_zq) {
- ZCRYPT_DBF_DBG("%s no match for address %02x.%04x => ENODEV\n",
- __func__, xcrb->user_defined, *domain);
+ pr_debug("%s no match for address %02x.%04x => ENODEV\n",
+ __func__, xcrb->user_defined, *domain);
rc = -ENODEV;
goto out;
}
@@ -954,6 +952,10 @@ static long _zcrypt_send_cprb(bool userspace, struct ap_perms *perms,
*domain = AP_QID_QUEUE(qid);
rc = pref_zq->ops->send_cprb(userspace, pref_zq, xcrb, &ap_msg);
+ if (!rc) {
+ print_hex_dump_debug("ccarpl: ", DUMP_PREFIX_ADDRESS, 16, 1,
+ ap_msg.msg, ap_msg.len, false);
+ }
spin_lock(&zcrypt_list_lock);
zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
@@ -972,7 +974,26 @@ out:
long zcrypt_send_cprb(struct ica_xcRB *xcrb)
{
- return _zcrypt_send_cprb(false, &ap_perms, NULL, xcrb);
+ struct zcrypt_track tr;
+ int rc;
+
+ memset(&tr, 0, sizeof(tr));
+
+ do {
+ rc = _zcrypt_send_cprb(false, &ap_perms, &tr, xcrb);
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
+
+ /* on ENODEV failure: retry once again after a requested rescan */
+ if (rc == -ENODEV && zcrypt_process_rescan())
+ do {
+ rc = _zcrypt_send_cprb(false, &ap_perms, &tr, xcrb);
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
+ if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
+ rc = -EIO;
+ if (rc)
+ pr_debug("%s rc=%d\n", __func__, rc);
+
+ return rc;
}
EXPORT_SYMBOL(zcrypt_send_cprb);
@@ -1047,6 +1068,8 @@ static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms,
rc = prep_ep11_ap_msg(userspace, xcrb, &ap_msg, &func_code, &domain);
if (rc)
goto out_free;
+ print_hex_dump_debug("ep11req: ", DUMP_PREFIX_ADDRESS, 16, 1,
+ ap_msg.msg, ap_msg.len, false);
if (perms != &ap_perms && domain < AUTOSEL_DOM) {
if (ap_msg.flags & AP_MSG_FLAG_ADMIN) {
@@ -1115,15 +1138,15 @@ static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms,
if (!pref_zq) {
if (targets && target_num == 1) {
- ZCRYPT_DBF_DBG("%s no match for address %02x.%04x => ENODEV\n",
- __func__, (int)targets->ap_id,
- (int)targets->dom_id);
+ pr_debug("%s no match for address %02x.%04x => ENODEV\n",
+ __func__, (int)targets->ap_id,
+ (int)targets->dom_id);
} else if (targets) {
- ZCRYPT_DBF_DBG("%s no match for %d target addrs => ENODEV\n",
- __func__, (int)target_num);
+ pr_debug("%s no match for %d target addrs => ENODEV\n",
+ __func__, (int)target_num);
} else {
- ZCRYPT_DBF_DBG("%s no match for address ff.ffff => ENODEV\n",
- __func__);
+ pr_debug("%s no match for address ff.ffff => ENODEV\n",
+ __func__);
}
rc = -ENODEV;
goto out_free;
@@ -1131,6 +1154,10 @@ static long _zcrypt_send_ep11_cprb(bool userspace, struct ap_perms *perms,
qid = pref_zq->queue->qid;
rc = pref_zq->ops->send_ep11_cprb(userspace, pref_zq, xcrb, &ap_msg);
+ if (!rc) {
+ print_hex_dump_debug("ep11rpl: ", DUMP_PREFIX_ADDRESS, 16, 1,
+ ap_msg.msg, ap_msg.len, false);
+ }
spin_lock(&zcrypt_list_lock);
zcrypt_drop_queue(pref_zc, pref_zq, mod, wgt);
@@ -1151,7 +1178,26 @@ out:
long zcrypt_send_ep11_cprb(struct ep11_urb *xcrb)
{
- return _zcrypt_send_ep11_cprb(false, &ap_perms, NULL, xcrb);
+ struct zcrypt_track tr;
+ int rc;
+
+ memset(&tr, 0, sizeof(tr));
+
+ do {
+ rc = _zcrypt_send_ep11_cprb(false, &ap_perms, &tr, xcrb);
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
+
+ /* on ENODEV failure: retry once again after a requested rescan */
+ if (rc == -ENODEV && zcrypt_process_rescan())
+ do {
+ rc = _zcrypt_send_ep11_cprb(false, &ap_perms, &tr, xcrb);
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
+ if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
+ rc = -EIO;
+ if (rc)
+ pr_debug("%s rc=%d\n", __func__, rc);
+
+ return rc;
}
EXPORT_SYMBOL(zcrypt_send_ep11_cprb);
@@ -1201,8 +1247,7 @@ static long zcrypt_rng(char *buffer)
spin_unlock(&zcrypt_list_lock);
if (!pref_zq) {
- ZCRYPT_DBF_DBG("%s no matching queue found => ENODEV\n",
- __func__);
+ pr_debug("%s no matching queue found => ENODEV\n", __func__);
rc = -ENODEV;
goto out;
}
@@ -1433,20 +1478,17 @@ static int icarsamodexpo_ioctl(struct ap_perms *perms, unsigned long arg)
do {
rc = zcrypt_rsa_modexpo(perms, &tr, &mex);
- if (rc == -EAGAIN)
- tr.again_counter++;
- } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
- /* on failure: retry once again after a requested rescan */
- if ((rc == -ENODEV) && (zcrypt_process_rescan()))
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
+
+ /* on ENODEV failure: retry once again after a requested rescan */
+ if (rc == -ENODEV && zcrypt_process_rescan())
do {
rc = zcrypt_rsa_modexpo(perms, &tr, &mex);
- if (rc == -EAGAIN)
- tr.again_counter++;
- } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
rc = -EIO;
if (rc) {
- ZCRYPT_DBF_DBG("ioctl ICARSAMODEXPO rc=%d\n", rc);
+ pr_debug("ioctl ICARSAMODEXPO rc=%d\n", rc);
return rc;
}
return put_user(mex.outputdatalength, &umex->outputdatalength);
@@ -1465,20 +1507,17 @@ static int icarsacrt_ioctl(struct ap_perms *perms, unsigned long arg)
do {
rc = zcrypt_rsa_crt(perms, &tr, &crt);
- if (rc == -EAGAIN)
- tr.again_counter++;
- } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
- /* on failure: retry once again after a requested rescan */
- if ((rc == -ENODEV) && (zcrypt_process_rescan()))
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
+
+ /* on ENODEV failure: retry once again after a requested rescan */
+ if (rc == -ENODEV && zcrypt_process_rescan())
do {
rc = zcrypt_rsa_crt(perms, &tr, &crt);
- if (rc == -EAGAIN)
- tr.again_counter++;
- } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
rc = -EIO;
if (rc) {
- ZCRYPT_DBF_DBG("ioctl ICARSACRT rc=%d\n", rc);
+ pr_debug("ioctl ICARSACRT rc=%d\n", rc);
return rc;
}
return put_user(crt.outputdatalength, &ucrt->outputdatalength);
@@ -1497,21 +1536,18 @@ static int zsecsendcprb_ioctl(struct ap_perms *perms, unsigned long arg)
do {
rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb);
- if (rc == -EAGAIN)
- tr.again_counter++;
- } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
- /* on failure: retry once again after a requested rescan */
- if ((rc == -ENODEV) && (zcrypt_process_rescan()))
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
+
+ /* on ENODEV failure: retry once again after a requested rescan */
+ if (rc == -ENODEV && zcrypt_process_rescan())
do {
rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb);
- if (rc == -EAGAIN)
- tr.again_counter++;
- } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
rc = -EIO;
if (rc)
- ZCRYPT_DBF_DBG("ioctl ZSENDCPRB rc=%d status=0x%x\n",
- rc, xcrb.status);
+ pr_debug("ioctl ZSENDCPRB rc=%d status=0x%x\n",
+ rc, xcrb.status);
if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb)))
return -EFAULT;
return rc;
@@ -1530,20 +1566,17 @@ static int zsendep11cprb_ioctl(struct ap_perms *perms, unsigned long arg)
do {
rc = _zcrypt_send_ep11_cprb(true, perms, &tr, &xcrb);
- if (rc == -EAGAIN)
- tr.again_counter++;
- } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
- /* on failure: retry once again after a requested rescan */
- if ((rc == -ENODEV) && (zcrypt_process_rescan()))
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
+
+ /* on ENODEV failure: retry once again after a requested rescan */
+ if (rc == -ENODEV && zcrypt_process_rescan())
do {
rc = _zcrypt_send_ep11_cprb(true, perms, &tr, &xcrb);
- if (rc == -EAGAIN)
- tr.again_counter++;
- } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
rc = -EIO;
if (rc)
- ZCRYPT_DBF_DBG("ioctl ZSENDEP11CPRB rc=%d\n", rc);
+ pr_debug("ioctl ZSENDEP11CPRB rc=%d\n", rc);
if (copy_to_user(uxcrb, &xcrb, sizeof(xcrb)))
return -EFAULT;
return rc;
@@ -1672,7 +1705,7 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
}
/* unknown ioctl number */
default:
- ZCRYPT_DBF_DBG("unknown ioctl 0x%08x\n", cmd);
+ pr_debug("unknown ioctl 0x%08x\n", cmd);
return -ENOIOCTLCMD;
}
}
@@ -1710,16 +1743,13 @@ static long trans_modexpo32(struct ap_perms *perms, struct file *filp,
mex64.n_modulus = compat_ptr(mex32.n_modulus);
do {
rc = zcrypt_rsa_modexpo(perms, &tr, &mex64);
- if (rc == -EAGAIN)
- tr.again_counter++;
- } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
- /* on failure: retry once again after a requested rescan */
- if ((rc == -ENODEV) && (zcrypt_process_rescan()))
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
+
+ /* on ENODEV failure: retry once again after a requested rescan */
+ if (rc == -ENODEV && zcrypt_process_rescan())
do {
rc = zcrypt_rsa_modexpo(perms, &tr, &mex64);
- if (rc == -EAGAIN)
- tr.again_counter++;
- } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
rc = -EIO;
if (rc)
@@ -1763,16 +1793,13 @@ static long trans_modexpo_crt32(struct ap_perms *perms, struct file *filp,
crt64.u_mult_inv = compat_ptr(crt32.u_mult_inv);
do {
rc = zcrypt_rsa_crt(perms, &tr, &crt64);
- if (rc == -EAGAIN)
- tr.again_counter++;
- } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
- /* on failure: retry once again after a requested rescan */
- if ((rc == -ENODEV) && (zcrypt_process_rescan()))
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
+
+ /* on ENODEV failure: retry once again after a requested rescan */
+ if (rc == -ENODEV && zcrypt_process_rescan())
do {
rc = zcrypt_rsa_crt(perms, &tr, &crt64);
- if (rc == -EAGAIN)
- tr.again_counter++;
- } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
rc = -EIO;
if (rc)
@@ -1835,16 +1862,13 @@ static long trans_xcrb32(struct ap_perms *perms, struct file *filp,
xcrb64.status = xcrb32.status;
do {
rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb64);
- if (rc == -EAGAIN)
- tr.again_counter++;
- } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
- /* on failure: retry once again after a requested rescan */
- if ((rc == -ENODEV) && (zcrypt_process_rescan()))
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
+
+ /* on ENODEV failure: retry once again after a requested rescan */
+ if (rc == -ENODEV && zcrypt_process_rescan())
do {
rc = _zcrypt_send_cprb(true, perms, &tr, &xcrb64);
- if (rc == -EAGAIN)
- tr.again_counter++;
- } while (rc == -EAGAIN && tr.again_counter < TRACK_AGAIN_MAX);
+ } while (rc == -EAGAIN && ++tr.again_counter < TRACK_AGAIN_MAX);
if (rc == -EAGAIN && tr.again_counter >= TRACK_AGAIN_MAX)
rc = -EIO;
xcrb32.reply_control_blk_length = xcrb64.reply_control_blk_length;
@@ -1916,8 +1940,8 @@ static int zcrypt_rng_data_read(struct hwrng *rng, u32 *data)
*/
if (zcrypt_rng_buffer_index == 0) {
rc = zcrypt_rng((char *)zcrypt_rng_buffer);
- /* on failure: retry once again after a requested rescan */
- if ((rc == -ENODEV) && (zcrypt_process_rescan()))
+ /* on ENODEV failure: retry once again after an AP bus rescan */
+ if (rc == -ENODEV && zcrypt_process_rescan())
rc = zcrypt_rng((char *)zcrypt_rng_buffer);
if (rc < 0)
return -EIO;
@@ -1979,7 +2003,7 @@ void zcrypt_rng_device_remove(void)
* an asynchronous job. This function waits until these initial jobs
* are done and so the zcrypt api should be ready to serve crypto
* requests - if there are resources available. The function uses an
- * internal timeout of 60s. The very first caller will either wait for
+ * internal timeout of 30s. The very first caller will either wait for
* ap bus bindings complete or the timeout happens. This state will be
* remembered for further callers which will only be blocked until a
* decision is made (timeout or bindings complete).
@@ -1998,8 +2022,8 @@ int zcrypt_wait_api_operational(void)
switch (zcrypt_wait_api_state) {
case 0:
/* initial state, invoke wait for the ap bus complete */
- rc = ap_wait_init_apqn_bindings_complete(
- msecs_to_jiffies(60 * 1000));
+ rc = ap_wait_apqn_bindings_complete(
+ msecs_to_jiffies(ZCRYPT_WAIT_BINDINGS_COMPLETE_MS));
switch (rc) {
case 0:
/* ap bus bindings are complete */
@@ -2016,8 +2040,8 @@ int zcrypt_wait_api_operational(void)
break;
default:
/* other failure */
- ZCRYPT_DBF_DBG("%s ap_wait_init_apqn_bindings_complete()=%d\n",
- __func__, rc);
+ pr_debug("%s ap_wait_init_apqn_bindings_complete()=%d\n",
+ __func__, rc);
break;
}
break;
@@ -2040,7 +2064,7 @@ EXPORT_SYMBOL(zcrypt_wait_api_operational);
int __init zcrypt_debug_init(void)
{
zcrypt_dbf_info = debug_register("zcrypt", 2, 1,
- DBF_MAX_SPRINTF_ARGS * sizeof(long));
+ ZCRYPT_DBF_MAX_SPRINTF_ARGS * sizeof(long));
debug_register_view(zcrypt_dbf_info, &debug_sprintf_view);
debug_set_level(zcrypt_dbf_info, DBF_ERR);
@@ -2057,12 +2081,9 @@ static int __init zcdn_init(void)
int rc;
/* create a new class 'zcrypt' */
- zcrypt_class = class_create(ZCRYPT_NAME);
- if (IS_ERR(zcrypt_class)) {
- rc = PTR_ERR(zcrypt_class);
- goto out_class_create_failed;
- }
- zcrypt_class->dev_release = zcdn_device_release;
+ rc = class_register(&zcrypt_class);
+ if (rc)
+ goto out_class_register_failed;
/* alloc device minor range */
rc = alloc_chrdev_region(&zcrypt_devt,
@@ -2078,35 +2099,35 @@ static int __init zcdn_init(void)
goto out_cdev_add_failed;
/* need some class specific sysfs attributes */
- rc = class_create_file(zcrypt_class, &class_attr_zcdn_create);
+ rc = class_create_file(&zcrypt_class, &class_attr_zcdn_create);
if (rc)
goto out_class_create_file_1_failed;
- rc = class_create_file(zcrypt_class, &class_attr_zcdn_destroy);
+ rc = class_create_file(&zcrypt_class, &class_attr_zcdn_destroy);
if (rc)
goto out_class_create_file_2_failed;
return 0;
out_class_create_file_2_failed:
- class_remove_file(zcrypt_class, &class_attr_zcdn_create);
+ class_remove_file(&zcrypt_class, &class_attr_zcdn_create);
out_class_create_file_1_failed:
cdev_del(&zcrypt_cdev);
out_cdev_add_failed:
unregister_chrdev_region(zcrypt_devt, ZCRYPT_MAX_MINOR_NODES);
out_alloc_chrdev_failed:
- class_destroy(zcrypt_class);
-out_class_create_failed:
+ class_unregister(&zcrypt_class);
+out_class_register_failed:
return rc;
}
static void zcdn_exit(void)
{
- class_remove_file(zcrypt_class, &class_attr_zcdn_create);
- class_remove_file(zcrypt_class, &class_attr_zcdn_destroy);
+ class_remove_file(&zcrypt_class, &class_attr_zcdn_create);
+ class_remove_file(&zcrypt_class, &class_attr_zcdn_destroy);
zcdn_destroy_all();
cdev_del(&zcrypt_cdev);
unregister_chrdev_region(zcrypt_devt, ZCRYPT_MAX_MINOR_NODES);
- class_destroy(zcrypt_class);
+ class_unregister(&zcrypt_class);
}
/*
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
index de659954c8..4ed481df57 100644
--- a/drivers/s390/crypto/zcrypt_api.h
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -38,6 +38,15 @@
*/
#define ZCRYPT_RNG_BUFFER_SIZE 4096
+/**
+ * The zcrypt_wait_api_operational() function waits this
+ * amount in milliseconds for ap_wait_aqpn_bindings_complete().
+ * Also on a cprb send failure with ENODEV the send functions
+ * trigger an ap bus rescan and wait this time in milliseconds
+ * for ap_wait_aqpn_bindings_complete() before resending.
+ */
+#define ZCRYPT_WAIT_BINDINGS_COMPLETE_MS 30000
+
/*
* Identifier for Crypto Request Performance Index
*/
diff --git a/drivers/s390/crypto/zcrypt_ccamisc.c b/drivers/s390/crypto/zcrypt_ccamisc.c
index 263fe18264..6087547328 100644
--- a/drivers/s390/crypto/zcrypt_ccamisc.c
+++ b/drivers/s390/crypto/zcrypt_ccamisc.c
@@ -23,11 +23,6 @@
#include "zcrypt_msgtype6.h"
#include "zcrypt_ccamisc.h"
-#define DEBUG_DBG(...) ZCRYPT_DBF(DBF_DEBUG, ##__VA_ARGS__)
-#define DEBUG_INFO(...) ZCRYPT_DBF(DBF_INFO, ##__VA_ARGS__)
-#define DEBUG_WARN(...) ZCRYPT_DBF(DBF_WARN, ##__VA_ARGS__)
-#define DEBUG_ERR(...) ZCRYPT_DBF(DBF_ERR, ##__VA_ARGS__)
-
/* Size of parameter block used for all cca requests/replies */
#define PARMBSIZE 512
@@ -367,8 +362,8 @@ int cca_genseckey(u16 cardnr, u16 domain,
memcpy(preqparm->lv1.key_length, "KEYLN32 ", 8);
break;
default:
- DEBUG_ERR("%s unknown/unsupported keybitsize %d\n",
- __func__, keybitsize);
+ ZCRYPT_DBF_ERR("%s unknown/unsupported keybitsize %d\n",
+ __func__, keybitsize);
rc = -EINVAL;
goto out;
}
@@ -386,15 +381,15 @@ int cca_genseckey(u16 cardnr, u16 domain,
/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
rc = zcrypt_send_cprb(&xcrb);
if (rc) {
- DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, errno %d\n",
- __func__, (int)cardnr, (int)domain, rc);
+ ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, errno %d\n",
+ __func__, (int)cardnr, (int)domain, rc);
goto out;
}
/* check response returncode and reasoncode */
if (prepcblk->ccp_rtcode != 0) {
- DEBUG_ERR("%s secure key generate failure, card response %d/%d\n",
- __func__,
+ ZCRYPT_DBF_ERR("%s secure key generate failure, card response %d/%d\n",
+ __func__,
(int)prepcblk->ccp_rtcode,
(int)prepcblk->ccp_rscode);
rc = -EIO;
@@ -411,8 +406,8 @@ int cca_genseckey(u16 cardnr, u16 domain,
- sizeof(prepparm->lv3.keyblock.toklen)
- sizeof(prepparm->lv3.keyblock.tokattr);
if (seckeysize != SECKEYBLOBSIZE) {
- DEBUG_ERR("%s secure token size mismatch %d != %d bytes\n",
- __func__, seckeysize, SECKEYBLOBSIZE);
+ ZCRYPT_DBF_ERR("%s secure token size mismatch %d != %d bytes\n",
+ __func__, seckeysize, SECKEYBLOBSIZE);
rc = -EIO;
goto out;
}
@@ -505,8 +500,8 @@ int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize,
keysize = 32;
break;
default:
- DEBUG_ERR("%s unknown/unsupported keybitsize %d\n",
- __func__, keybitsize);
+ ZCRYPT_DBF_ERR("%s unknown/unsupported keybitsize %d\n",
+ __func__, keybitsize);
rc = -EINVAL;
goto out;
}
@@ -524,17 +519,17 @@ int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize,
/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
rc = zcrypt_send_cprb(&xcrb);
if (rc) {
- DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
- __func__, (int)cardnr, (int)domain, rc);
+ ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
+ __func__, (int)cardnr, (int)domain, rc);
goto out;
}
/* check response returncode and reasoncode */
if (prepcblk->ccp_rtcode != 0) {
- DEBUG_ERR("%s clear key import failure, card response %d/%d\n",
- __func__,
- (int)prepcblk->ccp_rtcode,
- (int)prepcblk->ccp_rscode);
+ ZCRYPT_DBF_ERR("%s clear key import failure, card response %d/%d\n",
+ __func__,
+ (int)prepcblk->ccp_rtcode,
+ (int)prepcblk->ccp_rscode);
rc = -EIO;
goto out;
}
@@ -549,8 +544,8 @@ int cca_clr2seckey(u16 cardnr, u16 domain, u32 keybitsize,
- sizeof(prepparm->lv3.keyblock.toklen)
- sizeof(prepparm->lv3.keyblock.tokattr);
if (seckeysize != SECKEYBLOBSIZE) {
- DEBUG_ERR("%s secure token size mismatch %d != %d bytes\n",
- __func__, seckeysize, SECKEYBLOBSIZE);
+ ZCRYPT_DBF_ERR("%s secure token size mismatch %d != %d bytes\n",
+ __func__, seckeysize, SECKEYBLOBSIZE);
rc = -EIO;
goto out;
}
@@ -651,28 +646,28 @@ int cca_sec2protkey(u16 cardnr, u16 domain,
/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
rc = zcrypt_send_cprb(&xcrb);
if (rc) {
- DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
- __func__, (int)cardnr, (int)domain, rc);
+ ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
+ __func__, (int)cardnr, (int)domain, rc);
goto out;
}
/* check response returncode and reasoncode */
if (prepcblk->ccp_rtcode != 0) {
- DEBUG_ERR("%s unwrap secure key failure, card response %d/%d\n",
- __func__,
- (int)prepcblk->ccp_rtcode,
- (int)prepcblk->ccp_rscode);
+ ZCRYPT_DBF_ERR("%s unwrap secure key failure, card response %d/%d\n",
+ __func__,
+ (int)prepcblk->ccp_rtcode,
+ (int)prepcblk->ccp_rscode);
if (prepcblk->ccp_rtcode == 8 && prepcblk->ccp_rscode == 2290)
- rc = -EAGAIN;
+ rc = -EBUSY;
else
rc = -EIO;
goto out;
}
if (prepcblk->ccp_rscode != 0) {
- DEBUG_WARN("%s unwrap secure key warning, card response %d/%d\n",
- __func__,
- (int)prepcblk->ccp_rtcode,
- (int)prepcblk->ccp_rscode);
+ ZCRYPT_DBF_WARN("%s unwrap secure key warning, card response %d/%d\n",
+ __func__,
+ (int)prepcblk->ccp_rtcode,
+ (int)prepcblk->ccp_rscode);
}
/* process response cprb param block */
@@ -683,8 +678,8 @@ int cca_sec2protkey(u16 cardnr, u16 domain,
/* check the returned keyblock */
if (prepparm->lv3.ckb.version != 0x01 &&
prepparm->lv3.ckb.version != 0x02) {
- DEBUG_ERR("%s reply param keyblock version mismatch 0x%02x\n",
- __func__, (int)prepparm->lv3.ckb.version);
+ ZCRYPT_DBF_ERR("%s reply param keyblock version mismatch 0x%02x\n",
+ __func__, (int)prepparm->lv3.ckb.version);
rc = -EIO;
goto out;
}
@@ -707,8 +702,8 @@ int cca_sec2protkey(u16 cardnr, u16 domain,
*protkeytype = PKEY_KEYTYPE_AES_256;
break;
default:
- DEBUG_ERR("%s unknown/unsupported keylen %d\n",
- __func__, prepparm->lv3.ckb.len);
+ ZCRYPT_DBF_ERR("%s unknown/unsupported keylen %d\n",
+ __func__, prepparm->lv3.ckb.len);
rc = -EIO;
goto out;
}
@@ -840,9 +835,8 @@ int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
case 256:
break;
default:
- DEBUG_ERR(
- "%s unknown/unsupported keybitsize %d\n",
- __func__, keybitsize);
+ ZCRYPT_DBF_ERR("%s unknown/unsupported keybitsize %d\n",
+ __func__, keybitsize);
rc = -EINVAL;
goto out;
}
@@ -880,19 +874,17 @@ int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
rc = zcrypt_send_cprb(&xcrb);
if (rc) {
- DEBUG_ERR(
- "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
- __func__, (int)cardnr, (int)domain, rc);
+ ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
+ __func__, (int)cardnr, (int)domain, rc);
goto out;
}
/* check response returncode and reasoncode */
if (prepcblk->ccp_rtcode != 0) {
- DEBUG_ERR(
- "%s cipher key generate failure, card response %d/%d\n",
- __func__,
- (int)prepcblk->ccp_rtcode,
- (int)prepcblk->ccp_rscode);
+ ZCRYPT_DBF_ERR("%s cipher key generate failure, card response %d/%d\n",
+ __func__,
+ (int)prepcblk->ccp_rtcode,
+ (int)prepcblk->ccp_rscode);
rc = -EIO;
goto out;
}
@@ -905,8 +897,8 @@ int cca_gencipherkey(u16 cardnr, u16 domain, u32 keybitsize, u32 keygenflags,
/* do some plausibility checks on the key block */
if (prepparm->kb.len < 120 + 5 * sizeof(uint16_t) ||
prepparm->kb.len > 136 + 5 * sizeof(uint16_t)) {
- DEBUG_ERR("%s reply with invalid or unknown key block\n",
- __func__);
+ ZCRYPT_DBF_ERR("%s reply with invalid or unknown key block\n",
+ __func__);
rc = -EIO;
goto out;
}
@@ -1048,19 +1040,17 @@ static int _ip_cprb_helper(u16 cardnr, u16 domain,
/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
rc = zcrypt_send_cprb(&xcrb);
if (rc) {
- DEBUG_ERR(
- "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
- __func__, (int)cardnr, (int)domain, rc);
+ ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
+ __func__, (int)cardnr, (int)domain, rc);
goto out;
}
/* check response returncode and reasoncode */
if (prepcblk->ccp_rtcode != 0) {
- DEBUG_ERR(
- "%s CSNBKPI2 failure, card response %d/%d\n",
- __func__,
- (int)prepcblk->ccp_rtcode,
- (int)prepcblk->ccp_rscode);
+ ZCRYPT_DBF_ERR("%s CSNBKPI2 failure, card response %d/%d\n",
+ __func__,
+ (int)prepcblk->ccp_rtcode,
+ (int)prepcblk->ccp_rscode);
rc = -EIO;
goto out;
}
@@ -1073,8 +1063,8 @@ static int _ip_cprb_helper(u16 cardnr, u16 domain,
/* do some plausibility checks on the key block */
if (prepparm->kb.len < 120 + 3 * sizeof(uint16_t) ||
prepparm->kb.len > 136 + 3 * sizeof(uint16_t)) {
- DEBUG_ERR("%s reply with invalid or unknown key block\n",
- __func__);
+ ZCRYPT_DBF_ERR("%s reply with invalid or unknown key block\n",
+ __func__);
rc = -EIO;
goto out;
}
@@ -1132,33 +1122,29 @@ int cca_clr2cipherkey(u16 card, u16 dom, u32 keybitsize, u32 keygenflags,
rc = _ip_cprb_helper(card, dom, "AES ", "FIRST ", "MIN3PART",
exorbuf, keybitsize, token, &tokensize);
if (rc) {
- DEBUG_ERR(
- "%s clear key import 1/4 with CSNBKPI2 failed, rc=%d\n",
- __func__, rc);
+ ZCRYPT_DBF_ERR("%s clear key import 1/4 with CSNBKPI2 failed, rc=%d\n",
+ __func__, rc);
goto out;
}
rc = _ip_cprb_helper(card, dom, "AES ", "ADD-PART", NULL,
clrkey, keybitsize, token, &tokensize);
if (rc) {
- DEBUG_ERR(
- "%s clear key import 2/4 with CSNBKPI2 failed, rc=%d\n",
- __func__, rc);
+ ZCRYPT_DBF_ERR("%s clear key import 2/4 with CSNBKPI2 failed, rc=%d\n",
+ __func__, rc);
goto out;
}
rc = _ip_cprb_helper(card, dom, "AES ", "ADD-PART", NULL,
exorbuf, keybitsize, token, &tokensize);
if (rc) {
- DEBUG_ERR(
- "%s clear key import 3/4 with CSNBKPI2 failed, rc=%d\n",
- __func__, rc);
+ ZCRYPT_DBF_ERR("%s clear key import 3/4 with CSNBKPI2 failed, rc=%d\n",
+ __func__, rc);
goto out;
}
rc = _ip_cprb_helper(card, dom, "AES ", "COMPLETE", NULL,
NULL, keybitsize, token, &tokensize);
if (rc) {
- DEBUG_ERR(
- "%s clear key import 4/4 with CSNBKPI2 failed, rc=%d\n",
- __func__, rc);
+ ZCRYPT_DBF_ERR("%s clear key import 4/4 with CSNBKPI2 failed, rc=%d\n",
+ __func__, rc);
goto out;
}
@@ -1265,31 +1251,28 @@ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey,
/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
rc = zcrypt_send_cprb(&xcrb);
if (rc) {
- DEBUG_ERR(
- "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
- __func__, (int)cardnr, (int)domain, rc);
+ ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
+ __func__, (int)cardnr, (int)domain, rc);
goto out;
}
/* check response returncode and reasoncode */
if (prepcblk->ccp_rtcode != 0) {
- DEBUG_ERR(
- "%s unwrap secure key failure, card response %d/%d\n",
- __func__,
- (int)prepcblk->ccp_rtcode,
- (int)prepcblk->ccp_rscode);
+ ZCRYPT_DBF_ERR("%s unwrap secure key failure, card response %d/%d\n",
+ __func__,
+ (int)prepcblk->ccp_rtcode,
+ (int)prepcblk->ccp_rscode);
if (prepcblk->ccp_rtcode == 8 && prepcblk->ccp_rscode == 2290)
- rc = -EAGAIN;
+ rc = -EBUSY;
else
rc = -EIO;
goto out;
}
if (prepcblk->ccp_rscode != 0) {
- DEBUG_WARN(
- "%s unwrap secure key warning, card response %d/%d\n",
- __func__,
- (int)prepcblk->ccp_rtcode,
- (int)prepcblk->ccp_rscode);
+ ZCRYPT_DBF_WARN("%s unwrap secure key warning, card response %d/%d\n",
+ __func__,
+ (int)prepcblk->ccp_rtcode,
+ (int)prepcblk->ccp_rscode);
}
/* process response cprb param block */
@@ -1300,15 +1283,14 @@ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey,
/* check the returned keyblock */
if (prepparm->vud.ckb.version != 0x01 &&
prepparm->vud.ckb.version != 0x02) {
- DEBUG_ERR("%s reply param keyblock version mismatch 0x%02x\n",
- __func__, (int)prepparm->vud.ckb.version);
+ ZCRYPT_DBF_ERR("%s reply param keyblock version mismatch 0x%02x\n",
+ __func__, (int)prepparm->vud.ckb.version);
rc = -EIO;
goto out;
}
if (prepparm->vud.ckb.algo != 0x02) {
- DEBUG_ERR(
- "%s reply param keyblock algo mismatch 0x%02x != 0x02\n",
- __func__, (int)prepparm->vud.ckb.algo);
+ ZCRYPT_DBF_ERR("%s reply param keyblock algo mismatch 0x%02x != 0x02\n",
+ __func__, (int)prepparm->vud.ckb.algo);
rc = -EIO;
goto out;
}
@@ -1331,8 +1313,8 @@ int cca_cipher2protkey(u16 cardnr, u16 domain, const u8 *ckey,
*protkeytype = PKEY_KEYTYPE_AES_256;
break;
default:
- DEBUG_ERR("%s unknown/unsupported keylen %d\n",
- __func__, prepparm->vud.ckb.keylen);
+ ZCRYPT_DBF_ERR("%s unknown/unsupported keylen %d\n",
+ __func__, prepparm->vud.ckb.keylen);
rc = -EIO;
goto out;
}
@@ -1432,31 +1414,28 @@ int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key,
/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
rc = zcrypt_send_cprb(&xcrb);
if (rc) {
- DEBUG_ERR(
- "%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
- __func__, (int)cardnr, (int)domain, rc);
+ ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
+ __func__, (int)cardnr, (int)domain, rc);
goto out;
}
/* check response returncode and reasoncode */
if (prepcblk->ccp_rtcode != 0) {
- DEBUG_ERR(
- "%s unwrap secure key failure, card response %d/%d\n",
- __func__,
- (int)prepcblk->ccp_rtcode,
- (int)prepcblk->ccp_rscode);
+ ZCRYPT_DBF_ERR("%s unwrap secure key failure, card response %d/%d\n",
+ __func__,
+ (int)prepcblk->ccp_rtcode,
+ (int)prepcblk->ccp_rscode);
if (prepcblk->ccp_rtcode == 8 && prepcblk->ccp_rscode == 2290)
- rc = -EAGAIN;
+ rc = -EBUSY;
else
rc = -EIO;
goto out;
}
if (prepcblk->ccp_rscode != 0) {
- DEBUG_WARN(
- "%s unwrap secure key warning, card response %d/%d\n",
- __func__,
- (int)prepcblk->ccp_rtcode,
- (int)prepcblk->ccp_rscode);
+ ZCRYPT_DBF_WARN("%s unwrap secure key warning, card response %d/%d\n",
+ __func__,
+ (int)prepcblk->ccp_rtcode,
+ (int)prepcblk->ccp_rscode);
}
/* process response cprb param block */
@@ -1466,23 +1445,22 @@ int cca_ecc2protkey(u16 cardnr, u16 domain, const u8 *key,
/* check the returned keyblock */
if (prepparm->vud.ckb.version != 0x02) {
- DEBUG_ERR("%s reply param keyblock version mismatch 0x%02x != 0x02\n",
- __func__, (int)prepparm->vud.ckb.version);
+ ZCRYPT_DBF_ERR("%s reply param keyblock version mismatch 0x%02x != 0x02\n",
+ __func__, (int)prepparm->vud.ckb.version);
rc = -EIO;
goto out;
}
if (prepparm->vud.ckb.algo != 0x81) {
- DEBUG_ERR(
- "%s reply param keyblock algo mismatch 0x%02x != 0x81\n",
- __func__, (int)prepparm->vud.ckb.algo);
+ ZCRYPT_DBF_ERR("%s reply param keyblock algo mismatch 0x%02x != 0x81\n",
+ __func__, (int)prepparm->vud.ckb.algo);
rc = -EIO;
goto out;
}
/* copy the translated protected key */
if (prepparm->vud.ckb.keylen > *protkeylen) {
- DEBUG_ERR("%s prot keylen mismatch %d > buffersize %u\n",
- __func__, prepparm->vud.ckb.keylen, *protkeylen);
+ ZCRYPT_DBF_ERR("%s prot keylen mismatch %d > buffersize %u\n",
+ __func__, prepparm->vud.ckb.keylen, *protkeylen);
rc = -EIO;
goto out;
}
@@ -1550,17 +1528,17 @@ int cca_query_crypto_facility(u16 cardnr, u16 domain,
/* forward xcrb with request CPRB and reply CPRB to zcrypt dd */
rc = zcrypt_send_cprb(&xcrb);
if (rc) {
- DEBUG_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
- __func__, (int)cardnr, (int)domain, rc);
+ ZCRYPT_DBF_ERR("%s zcrypt_send_cprb (cardnr=%d domain=%d) failed, rc=%d\n",
+ __func__, (int)cardnr, (int)domain, rc);
goto out;
}
/* check response returncode and reasoncode */
if (prepcblk->ccp_rtcode != 0) {
- DEBUG_ERR("%s unwrap secure key failure, card response %d/%d\n",
- __func__,
- (int)prepcblk->ccp_rtcode,
- (int)prepcblk->ccp_rscode);
+ ZCRYPT_DBF_ERR("%s unwrap secure key failure, card response %d/%d\n",
+ __func__,
+ (int)prepcblk->ccp_rtcode,
+ (int)prepcblk->ccp_rscode);
rc = -EIO;
goto out;
}
diff --git a/drivers/s390/crypto/zcrypt_debug.h b/drivers/s390/crypto/zcrypt_debug.h
index 5cf88aabd6..9a208dc4c2 100644
--- a/drivers/s390/crypto/zcrypt_debug.h
+++ b/drivers/s390/crypto/zcrypt_debug.h
@@ -17,7 +17,7 @@
#define RC2ERR(rc) ((rc) ? DBF_ERR : DBF_INFO)
#define RC2WARN(rc) ((rc) ? DBF_WARN : DBF_INFO)
-#define DBF_MAX_SPRINTF_ARGS 6
+#define ZCRYPT_DBF_MAX_SPRINTF_ARGS 6
#define ZCRYPT_DBF(...) \
debug_sprintf_event(zcrypt_dbf_info, ##__VA_ARGS__)
@@ -27,8 +27,6 @@
debug_sprintf_event(zcrypt_dbf_info, DBF_WARN, ##__VA_ARGS__)
#define ZCRYPT_DBF_INFO(...) \
debug_sprintf_event(zcrypt_dbf_info, DBF_INFO, ##__VA_ARGS__)
-#define ZCRYPT_DBF_DBG(...) \
- debug_sprintf_event(zcrypt_dbf_info, DBF_DEBUG, ##__VA_ARGS__)
extern debug_info_t *zcrypt_dbf_info;
diff --git a/drivers/s390/crypto/zcrypt_ep11misc.c b/drivers/s390/crypto/zcrypt_ep11misc.c
index 0a877f9792..9bcf8fc69e 100644
--- a/drivers/s390/crypto/zcrypt_ep11misc.c
+++ b/drivers/s390/crypto/zcrypt_ep11misc.c
@@ -24,11 +24,6 @@
#include "zcrypt_ep11misc.h"
#include "zcrypt_ccamisc.h"
-#define DEBUG_DBG(...) ZCRYPT_DBF(DBF_DEBUG, ##__VA_ARGS__)
-#define DEBUG_INFO(...) ZCRYPT_DBF(DBF_INFO, ##__VA_ARGS__)
-#define DEBUG_WARN(...) ZCRYPT_DBF(DBF_WARN, ##__VA_ARGS__)
-#define DEBUG_ERR(...) ZCRYPT_DBF(DBF_ERR, ##__VA_ARGS__)
-
#define EP11_PINBLOB_V1_BYTES 56
/* default iv used here */
@@ -510,7 +505,7 @@ static int check_reply_pl(const u8 *pl, const char *func)
/* start tag */
if (*pl++ != 0x30) {
- DEBUG_ERR("%s reply start tag mismatch\n", func);
+ ZCRYPT_DBF_ERR("%s reply start tag mismatch\n", func);
return -EIO;
}
@@ -527,46 +522,63 @@ static int check_reply_pl(const u8 *pl, const char *func)
len = *((u16 *)pl);
pl += 2;
} else {
- DEBUG_ERR("%s reply start tag lenfmt mismatch 0x%02hhx\n",
- func, *pl);
+ ZCRYPT_DBF_ERR("%s reply start tag lenfmt mismatch 0x%02hhx\n",
+ func, *pl);
return -EIO;
}
/* len should cover at least 3 fields with 32 bit value each */
if (len < 3 * 6) {
- DEBUG_ERR("%s reply length %d too small\n", func, len);
+ ZCRYPT_DBF_ERR("%s reply length %d too small\n", func, len);
return -EIO;
}
/* function tag, length and value */
if (pl[0] != 0x04 || pl[1] != 0x04) {
- DEBUG_ERR("%s function tag or length mismatch\n", func);
+ ZCRYPT_DBF_ERR("%s function tag or length mismatch\n", func);
return -EIO;
}
pl += 6;
/* dom tag, length and value */
if (pl[0] != 0x04 || pl[1] != 0x04) {
- DEBUG_ERR("%s dom tag or length mismatch\n", func);
+ ZCRYPT_DBF_ERR("%s dom tag or length mismatch\n", func);
return -EIO;
}
pl += 6;
/* return value tag, length and value */
if (pl[0] != 0x04 || pl[1] != 0x04) {
- DEBUG_ERR("%s return value tag or length mismatch\n", func);
+ ZCRYPT_DBF_ERR("%s return value tag or length mismatch\n",
+ func);
return -EIO;
}
pl += 2;
ret = *((u32 *)pl);
if (ret != 0) {
- DEBUG_ERR("%s return value 0x%04x != 0\n", func, ret);
+ ZCRYPT_DBF_ERR("%s return value 0x%08x != 0\n", func, ret);
return -EIO;
}
return 0;
}
+/* Check ep11 reply cprb, return 0 or suggested errno value. */
+static int check_reply_cprb(const struct ep11_cprb *rep, const char *func)
+{
+ /* check ep11 reply return code field */
+ if (rep->ret_code) {
+ ZCRYPT_DBF_ERR("%s ep11 reply ret_code=0x%08x\n", __func__,
+ rep->ret_code);
+ if (rep->ret_code == 0x000c0003)
+ return -EBUSY;
+ else
+ return -EIO;
+ }
+
+ return 0;
+}
+
/*
* Helper function which does an ep11 query with given query type.
*/
@@ -626,23 +638,28 @@ static int ep11_query_info(u16 cardnr, u16 domain, u32 query_type,
rc = zcrypt_send_ep11_cprb(urb);
if (rc) {
- DEBUG_ERR(
- "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
- __func__, (int)cardnr, (int)domain, rc);
+ ZCRYPT_DBF_ERR("%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
+ __func__, (int)cardnr, (int)domain, rc);
goto out;
}
+ /* check ep11 reply cprb */
+ rc = check_reply_cprb(rep, __func__);
+ if (rc)
+ goto out;
+
+ /* check payload */
rc = check_reply_pl((u8 *)rep_pl, __func__);
if (rc)
goto out;
if (rep_pl->data_tag != 0x04 || rep_pl->data_lenfmt != 0x82) {
- DEBUG_ERR("%s unknown reply data format\n", __func__);
+ ZCRYPT_DBF_ERR("%s unknown reply data format\n", __func__);
rc = -EIO;
goto out;
}
if (rep_pl->data_len > buflen) {
- DEBUG_ERR("%s mismatch between reply data len and buffer len\n",
- __func__);
+ ZCRYPT_DBF_ERR("%s mismatch between reply data len and buffer len\n",
+ __func__);
rc = -ENOSPC;
goto out;
}
@@ -816,9 +833,8 @@ static int _ep11_genaeskey(u16 card, u16 domain,
case 256:
break;
default:
- DEBUG_ERR(
- "%s unknown/unsupported keybitsize %d\n",
- __func__, keybitsize);
+ ZCRYPT_DBF_ERR("%s unknown/unsupported keybitsize %d\n",
+ __func__, keybitsize);
rc = -EINVAL;
goto out;
}
@@ -878,23 +894,28 @@ static int _ep11_genaeskey(u16 card, u16 domain,
rc = zcrypt_send_ep11_cprb(urb);
if (rc) {
- DEBUG_ERR(
- "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
- __func__, (int)card, (int)domain, rc);
+ ZCRYPT_DBF_ERR("%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
+ __func__, (int)card, (int)domain, rc);
goto out;
}
+ /* check ep11 reply cprb */
+ rc = check_reply_cprb(rep, __func__);
+ if (rc)
+ goto out;
+
+ /* check payload */
rc = check_reply_pl((u8 *)rep_pl, __func__);
if (rc)
goto out;
if (rep_pl->data_tag != 0x04 || rep_pl->data_lenfmt != 0x82) {
- DEBUG_ERR("%s unknown reply data format\n", __func__);
+ ZCRYPT_DBF_ERR("%s unknown reply data format\n", __func__);
rc = -EIO;
goto out;
}
if (rep_pl->data_len > *keybufsize) {
- DEBUG_ERR("%s mismatch reply data len / key buffer len\n",
- __func__);
+ ZCRYPT_DBF_ERR("%s mismatch reply data len / key buffer len\n",
+ __func__);
rc = -ENOSPC;
goto out;
}
@@ -1030,17 +1051,22 @@ static int ep11_cryptsingle(u16 card, u16 domain,
rc = zcrypt_send_ep11_cprb(urb);
if (rc) {
- DEBUG_ERR(
- "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
- __func__, (int)card, (int)domain, rc);
+ ZCRYPT_DBF_ERR("%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
+ __func__, (int)card, (int)domain, rc);
goto out;
}
+ /* check ep11 reply cprb */
+ rc = check_reply_cprb(rep, __func__);
+ if (rc)
+ goto out;
+
+ /* check payload */
rc = check_reply_pl((u8 *)rep_pl, __func__);
if (rc)
goto out;
if (rep_pl->data_tag != 0x04) {
- DEBUG_ERR("%s unknown reply data format\n", __func__);
+ ZCRYPT_DBF_ERR("%s unknown reply data format\n", __func__);
rc = -EIO;
goto out;
}
@@ -1053,14 +1079,14 @@ static int ep11_cryptsingle(u16 card, u16 domain,
n = *((u16 *)p);
p += 2;
} else {
- DEBUG_ERR("%s unknown reply data length format 0x%02hhx\n",
- __func__, rep_pl->data_lenfmt);
+ ZCRYPT_DBF_ERR("%s unknown reply data length format 0x%02hhx\n",
+ __func__, rep_pl->data_lenfmt);
rc = -EIO;
goto out;
}
if (n > *outbufsize) {
- DEBUG_ERR("%s mismatch reply data len %d / output buffer %zu\n",
- __func__, n, *outbufsize);
+ ZCRYPT_DBF_ERR("%s mismatch reply data len %d / output buffer %zu\n",
+ __func__, n, *outbufsize);
rc = -ENOSPC;
goto out;
}
@@ -1188,23 +1214,28 @@ static int _ep11_unwrapkey(u16 card, u16 domain,
rc = zcrypt_send_ep11_cprb(urb);
if (rc) {
- DEBUG_ERR(
- "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
- __func__, (int)card, (int)domain, rc);
+ ZCRYPT_DBF_ERR("%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
+ __func__, (int)card, (int)domain, rc);
goto out;
}
+ /* check ep11 reply cprb */
+ rc = check_reply_cprb(rep, __func__);
+ if (rc)
+ goto out;
+
+ /* check payload */
rc = check_reply_pl((u8 *)rep_pl, __func__);
if (rc)
goto out;
if (rep_pl->data_tag != 0x04 || rep_pl->data_lenfmt != 0x82) {
- DEBUG_ERR("%s unknown reply data format\n", __func__);
+ ZCRYPT_DBF_ERR("%s unknown reply data format\n", __func__);
rc = -EIO;
goto out;
}
if (rep_pl->data_len > *keybufsize) {
- DEBUG_ERR("%s mismatch reply data len / key buffer len\n",
- __func__);
+ ZCRYPT_DBF_ERR("%s mismatch reply data len / key buffer len\n",
+ __func__);
rc = -ENOSPC;
goto out;
}
@@ -1343,23 +1374,28 @@ static int _ep11_wrapkey(u16 card, u16 domain,
rc = zcrypt_send_ep11_cprb(urb);
if (rc) {
- DEBUG_ERR(
- "%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
- __func__, (int)card, (int)domain, rc);
+ ZCRYPT_DBF_ERR("%s zcrypt_send_ep11_cprb(card=%d dom=%d) failed, rc=%d\n",
+ __func__, (int)card, (int)domain, rc);
goto out;
}
+ /* check ep11 reply cprb */
+ rc = check_reply_cprb(rep, __func__);
+ if (rc)
+ goto out;
+
+ /* check payload */
rc = check_reply_pl((u8 *)rep_pl, __func__);
if (rc)
goto out;
if (rep_pl->data_tag != 0x04 || rep_pl->data_lenfmt != 0x82) {
- DEBUG_ERR("%s unknown reply data format\n", __func__);
+ ZCRYPT_DBF_ERR("%s unknown reply data format\n", __func__);
rc = -EIO;
goto out;
}
if (rep_pl->data_len > *datasize) {
- DEBUG_ERR("%s mismatch reply data len / data buffer len\n",
- __func__);
+ ZCRYPT_DBF_ERR("%s mismatch reply data len / data buffer len\n",
+ __func__);
rc = -ENOSPC;
goto out;
}
@@ -1386,9 +1422,8 @@ int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
if (keybitsize == 128 || keybitsize == 192 || keybitsize == 256) {
clrkeylen = keybitsize / 8;
} else {
- DEBUG_ERR(
- "%s unknown/unsupported keybitsize %d\n",
- __func__, keybitsize);
+ ZCRYPT_DBF_ERR("%s unknown/unsupported keybitsize %d\n",
+ __func__, keybitsize);
return -EINVAL;
}
@@ -1405,9 +1440,8 @@ int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
0x00006c00, /* EN/DECRYPT, WRAP/UNWRAP */
kek, &keklen);
if (rc) {
- DEBUG_ERR(
- "%s generate kek key failed, rc=%d\n",
- __func__, rc);
+ ZCRYPT_DBF_ERR("%s generate kek key failed, rc=%d\n",
+ __func__, rc);
goto out;
}
@@ -1415,9 +1449,8 @@ int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
rc = ep11_cryptsingle(card, domain, 0, 0, def_iv, kek, keklen,
clrkey, clrkeylen, encbuf, &encbuflen);
if (rc) {
- DEBUG_ERR(
- "%s encrypting key value with kek key failed, rc=%d\n",
- __func__, rc);
+ ZCRYPT_DBF_ERR("%s encrypting key value with kek key failed, rc=%d\n",
+ __func__, rc);
goto out;
}
@@ -1426,9 +1459,8 @@ int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
encbuf, encbuflen, 0, def_iv,
keybitsize, 0, keybuf, keybufsize, keytype);
if (rc) {
- DEBUG_ERR(
- "%s importing key value as new key failed,, rc=%d\n",
- __func__, rc);
+ ZCRYPT_DBF_ERR("%s importing key value as new key failed,, rc=%d\n",
+ __func__, rc);
goto out;
}
@@ -1476,17 +1508,16 @@ int ep11_kblob2protkey(u16 card, u16 dom,
rc = _ep11_wrapkey(card, dom, (u8 *)key, keylen,
0, def_iv, wkbuf, &wkbuflen);
if (rc) {
- DEBUG_ERR(
- "%s rewrapping ep11 key to pkey failed, rc=%d\n",
- __func__, rc);
+ ZCRYPT_DBF_ERR("%s rewrapping ep11 key to pkey failed, rc=%d\n",
+ __func__, rc);
goto out;
}
wki = (struct wk_info *)wkbuf;
/* check struct version and pkey type */
if (wki->version != 1 || wki->pkeytype < 1 || wki->pkeytype > 5) {
- DEBUG_ERR("%s wk info version %d or pkeytype %d mismatch.\n",
- __func__, (int)wki->version, (int)wki->pkeytype);
+ ZCRYPT_DBF_ERR("%s wk info version %d or pkeytype %d mismatch.\n",
+ __func__, (int)wki->version, (int)wki->pkeytype);
rc = -EIO;
goto out;
}
@@ -1511,8 +1542,8 @@ int ep11_kblob2protkey(u16 card, u16 dom,
*protkeytype = PKEY_KEYTYPE_AES_256;
break;
default:
- DEBUG_ERR("%s unknown/unsupported AES pkeysize %d\n",
- __func__, (int)wki->pkeysize);
+ ZCRYPT_DBF_ERR("%s unknown/unsupported AES pkeysize %d\n",
+ __func__, (int)wki->pkeysize);
rc = -EIO;
goto out;
}
@@ -1525,16 +1556,16 @@ int ep11_kblob2protkey(u16 card, u16 dom,
break;
case 2: /* TDES */
default:
- DEBUG_ERR("%s unknown/unsupported key type %d\n",
- __func__, (int)wki->pkeytype);
+ ZCRYPT_DBF_ERR("%s unknown/unsupported key type %d\n",
+ __func__, (int)wki->pkeytype);
rc = -EIO;
goto out;
}
/* copy the translated protected key */
if (wki->pkeysize > *protkeylen) {
- DEBUG_ERR("%s wk info pkeysize %llu > protkeysize %u\n",
- __func__, wki->pkeysize, *protkeylen);
+ ZCRYPT_DBF_ERR("%s wk info pkeysize %llu > protkeysize %u\n",
+ __func__, wki->pkeysize, *protkeylen);
rc = -EINVAL;
goto out;
}
diff --git a/drivers/s390/crypto/zcrypt_error.h b/drivers/s390/crypto/zcrypt_error.h
index a44fcfcec9..46e27b43a8 100644
--- a/drivers/s390/crypto/zcrypt_error.h
+++ b/drivers/s390/crypto/zcrypt_error.h
@@ -119,10 +119,9 @@ static inline int convert_error(struct zcrypt_queue *zq,
case REP82_ERROR_MESSAGE_TYPE: /* 0x20 */
case REP82_ERROR_TRANSPORT_FAIL: /* 0x90 */
/*
- * Msg to wrong type or card/infrastructure failure.
- * Trigger rescan of the ap bus, trigger retry request.
+ * Msg to wrong type or card/infrastructure failure. Return
+ * EAGAIN, the upper layer may do a retry on the request.
*/
- atomic_set(&zcrypt_rescan_req, 1);
/* For type 86 response show the apfs value (failure reason) */
if (ehdr->reply_code == REP82_ERROR_TRANSPORT_FAIL &&
ehdr->type == TYPE86_RSP_CODE) {
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c
index 2e155de8ab..3b39cb8f92 100644
--- a/drivers/s390/crypto/zcrypt_msgtype50.c
+++ b/drivers/s390/crypto/zcrypt_msgtype50.c
@@ -427,7 +427,7 @@ static void zcrypt_msgtype50_receive(struct ap_queue *aq,
len = t80h->len;
if (len > reply->bufsize || len > msg->bufsize ||
len != reply->len) {
- ZCRYPT_DBF_DBG("%s len mismatch => EMSGSIZE\n", __func__);
+ pr_debug("%s len mismatch => EMSGSIZE\n", __func__);
msg->rc = -EMSGSIZE;
goto out;
}
@@ -487,9 +487,9 @@ static long zcrypt_msgtype50_modexpo(struct zcrypt_queue *zq,
out:
ap_msg->private = NULL;
if (rc)
- ZCRYPT_DBF_DBG("%s send me cprb at dev=%02x.%04x rc=%d\n",
- __func__, AP_QID_CARD(zq->queue->qid),
- AP_QID_QUEUE(zq->queue->qid), rc);
+ pr_debug("%s send me cprb at dev=%02x.%04x rc=%d\n",
+ __func__, AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid), rc);
return rc;
}
@@ -537,9 +537,9 @@ static long zcrypt_msgtype50_modexpo_crt(struct zcrypt_queue *zq,
out:
ap_msg->private = NULL;
if (rc)
- ZCRYPT_DBF_DBG("%s send crt cprb at dev=%02x.%04x rc=%d\n",
- __func__, AP_QID_CARD(zq->queue->qid),
- AP_QID_QUEUE(zq->queue->qid), rc);
+ pr_debug("%s send crt cprb at dev=%02x.%04x rc=%d\n",
+ __func__, AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid), rc);
return rc;
}
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
index 3c53abbdc3..215f257d23 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -437,9 +437,9 @@ static int xcrb_msg_to_type6cprb_msgx(bool userspace, struct ap_message *ap_msg,
ap_msg->flags |= AP_MSG_FLAG_ADMIN;
break;
default:
- ZCRYPT_DBF_DBG("%s unknown CPRB minor version '%c%c'\n",
- __func__, msg->cprbx.func_id[0],
- msg->cprbx.func_id[1]);
+ pr_debug("%s unknown CPRB minor version '%c%c'\n",
+ __func__, msg->cprbx.func_id[0],
+ msg->cprbx.func_id[1]);
}
/* copy data block */
@@ -629,9 +629,9 @@ static int convert_type86_xcrb(bool userspace, struct zcrypt_queue *zq,
/* Copy CPRB to user */
if (xcrb->reply_control_blk_length < msg->fmt2.count1) {
- ZCRYPT_DBF_DBG("%s reply_control_blk_length %u < required %u => EMSGSIZE\n",
- __func__, xcrb->reply_control_blk_length,
- msg->fmt2.count1);
+ pr_debug("%s reply_control_blk_length %u < required %u => EMSGSIZE\n",
+ __func__, xcrb->reply_control_blk_length,
+ msg->fmt2.count1);
return -EMSGSIZE;
}
if (z_copy_to_user(userspace, xcrb->reply_control_blk_addr,
@@ -642,9 +642,9 @@ static int convert_type86_xcrb(bool userspace, struct zcrypt_queue *zq,
/* Copy data buffer to user */
if (msg->fmt2.count2) {
if (xcrb->reply_data_length < msg->fmt2.count2) {
- ZCRYPT_DBF_DBG("%s reply_data_length %u < required %u => EMSGSIZE\n",
- __func__, xcrb->reply_data_length,
- msg->fmt2.count2);
+ pr_debug("%s reply_data_length %u < required %u => EMSGSIZE\n",
+ __func__, xcrb->reply_data_length,
+ msg->fmt2.count2);
return -EMSGSIZE;
}
if (z_copy_to_user(userspace, xcrb->reply_data_addr,
@@ -673,9 +673,9 @@ static int convert_type86_ep11_xcrb(bool userspace, struct zcrypt_queue *zq,
char *data = reply->msg;
if (xcrb->resp_len < msg->fmt2.count1) {
- ZCRYPT_DBF_DBG("%s resp_len %u < required %u => EMSGSIZE\n",
- __func__, (unsigned int)xcrb->resp_len,
- msg->fmt2.count1);
+ pr_debug("%s resp_len %u < required %u => EMSGSIZE\n",
+ __func__, (unsigned int)xcrb->resp_len,
+ msg->fmt2.count1);
return -EMSGSIZE;
}
@@ -875,7 +875,8 @@ static void zcrypt_msgtype6_receive(struct ap_queue *aq,
len = sizeof(struct type86x_reply) + t86r->length;
if (len > reply->bufsize || len > msg->bufsize ||
len != reply->len) {
- ZCRYPT_DBF_DBG("%s len mismatch => EMSGSIZE\n", __func__);
+ pr_debug("%s len mismatch => EMSGSIZE\n",
+ __func__);
msg->rc = -EMSGSIZE;
goto out;
}
@@ -889,7 +890,8 @@ static void zcrypt_msgtype6_receive(struct ap_queue *aq,
len = t86r->fmt2.offset1 + t86r->fmt2.count1;
if (len > reply->bufsize || len > msg->bufsize ||
len != reply->len) {
- ZCRYPT_DBF_DBG("%s len mismatch => EMSGSIZE\n", __func__);
+ pr_debug("%s len mismatch => EMSGSIZE\n",
+ __func__);
msg->rc = -EMSGSIZE;
goto out;
}
@@ -939,7 +941,8 @@ static void zcrypt_msgtype6_receive_ep11(struct ap_queue *aq,
len = t86r->fmt2.offset1 + t86r->fmt2.count1;
if (len > reply->bufsize || len > msg->bufsize ||
len != reply->len) {
- ZCRYPT_DBF_DBG("%s len mismatch => EMSGSIZE\n", __func__);
+ pr_debug("%s len mismatch => EMSGSIZE\n",
+ __func__);
msg->rc = -EMSGSIZE;
goto out;
}
@@ -1151,9 +1154,9 @@ static long zcrypt_msgtype6_send_cprb(bool userspace, struct zcrypt_queue *zq,
out:
if (rc)
- ZCRYPT_DBF_DBG("%s send cprb at dev=%02x.%04x rc=%d\n",
- __func__, AP_QID_CARD(zq->queue->qid),
- AP_QID_QUEUE(zq->queue->qid), rc);
+ pr_debug("%s send cprb at dev=%02x.%04x rc=%d\n",
+ __func__, AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid), rc);
return rc;
}
@@ -1274,9 +1277,9 @@ static long zcrypt_msgtype6_send_ep11_cprb(bool userspace, struct zcrypt_queue *
out:
if (rc)
- ZCRYPT_DBF_DBG("%s send cprb at dev=%02x.%04x rc=%d\n",
- __func__, AP_QID_CARD(zq->queue->qid),
- AP_QID_QUEUE(zq->queue->qid), rc);
+ pr_debug("%s send cprb at dev=%02x.%04x rc=%d\n",
+ __func__, AP_QID_CARD(zq->queue->qid),
+ AP_QID_QUEUE(zq->queue->qid), rc);
return rc;
}
diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c
index 90ec477386..9678c6a2cd 100644
--- a/drivers/s390/net/ctcm_fsms.c
+++ b/drivers/s390/net/ctcm_fsms.c
@@ -1325,7 +1325,7 @@ static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg)
clear_normalized_cda(&ch->ccw[1]);
CTCM_PR_DBGDATA("ccwcda=0x%p data=0x%p\n",
- (void *)(unsigned long)ch->ccw[1].cda,
+ (void *)(u64)dma32_to_u32(ch->ccw[1].cda),
ch->trans_skb->data);
ch->ccw[1].count = ch->max_bufsize;
@@ -1340,7 +1340,7 @@ static void ctcmpc_chx_txdone(fsm_instance *fi, int event, void *arg)
}
CTCM_PR_DBGDATA("ccwcda=0x%p data=0x%p\n",
- (void *)(unsigned long)ch->ccw[1].cda,
+ (void *)(u64)dma32_to_u32(ch->ccw[1].cda),
ch->trans_skb->data);
ch->ccw[1].count = ch->trans_skb->len;
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index ac15d7c2b2..878fe3ce53 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -1389,7 +1389,7 @@ static int add_channel(struct ccw_device *cdev, enum ctcm_channel_types type,
ch->ccw[15].cmd_code = CCW_CMD_WRITE;
ch->ccw[15].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
ch->ccw[15].count = TH_HEADER_LENGTH;
- ch->ccw[15].cda = virt_to_phys(ch->discontact_th);
+ ch->ccw[15].cda = virt_to_dma32(ch->discontact_th);
ch->ccw[16].cmd_code = CCW_CMD_NOOP;
ch->ccw[16].flags = CCW_FLAG_SLI;
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c
index 7a2f34a5e0..9e580ef69b 100644
--- a/drivers/s390/net/ctcm_mpc.c
+++ b/drivers/s390/net/ctcm_mpc.c
@@ -1708,57 +1708,57 @@ static void mpc_action_side_xid(fsm_instance *fsm, void *arg, int side)
ch->ccw[9].cmd_code = CCW_CMD_WRITE;
ch->ccw[9].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
ch->ccw[9].count = TH_HEADER_LENGTH;
- ch->ccw[9].cda = virt_to_phys(ch->xid_th);
+ ch->ccw[9].cda = virt_to_dma32(ch->xid_th);
if (ch->xid == NULL)
goto done;
ch->ccw[10].cmd_code = CCW_CMD_WRITE;
ch->ccw[10].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
ch->ccw[10].count = XID2_LENGTH;
- ch->ccw[10].cda = virt_to_phys(ch->xid);
+ ch->ccw[10].cda = virt_to_dma32(ch->xid);
ch->ccw[11].cmd_code = CCW_CMD_READ;
ch->ccw[11].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
ch->ccw[11].count = TH_HEADER_LENGTH;
- ch->ccw[11].cda = virt_to_phys(ch->rcvd_xid_th);
+ ch->ccw[11].cda = virt_to_dma32(ch->rcvd_xid_th);
ch->ccw[12].cmd_code = CCW_CMD_READ;
ch->ccw[12].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
ch->ccw[12].count = XID2_LENGTH;
- ch->ccw[12].cda = virt_to_phys(ch->rcvd_xid);
+ ch->ccw[12].cda = virt_to_dma32(ch->rcvd_xid);
ch->ccw[13].cmd_code = CCW_CMD_READ;
- ch->ccw[13].cda = virt_to_phys(ch->rcvd_xid_id);
+ ch->ccw[13].cda = virt_to_dma32(ch->rcvd_xid_id);
} else { /* side == YSIDE : mpc_action_yside_xid */
ch->ccw[9].cmd_code = CCW_CMD_READ;
ch->ccw[9].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
ch->ccw[9].count = TH_HEADER_LENGTH;
- ch->ccw[9].cda = virt_to_phys(ch->rcvd_xid_th);
+ ch->ccw[9].cda = virt_to_dma32(ch->rcvd_xid_th);
ch->ccw[10].cmd_code = CCW_CMD_READ;
ch->ccw[10].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
ch->ccw[10].count = XID2_LENGTH;
- ch->ccw[10].cda = virt_to_phys(ch->rcvd_xid);
+ ch->ccw[10].cda = virt_to_dma32(ch->rcvd_xid);
if (ch->xid_th == NULL)
goto done;
ch->ccw[11].cmd_code = CCW_CMD_WRITE;
ch->ccw[11].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
ch->ccw[11].count = TH_HEADER_LENGTH;
- ch->ccw[11].cda = virt_to_phys(ch->xid_th);
+ ch->ccw[11].cda = virt_to_dma32(ch->xid_th);
if (ch->xid == NULL)
goto done;
ch->ccw[12].cmd_code = CCW_CMD_WRITE;
ch->ccw[12].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
ch->ccw[12].count = XID2_LENGTH;
- ch->ccw[12].cda = virt_to_phys(ch->xid);
+ ch->ccw[12].cda = virt_to_dma32(ch->xid);
if (ch->xid_id == NULL)
goto done;
ch->ccw[13].cmd_code = CCW_CMD_WRITE;
- ch->ccw[13].cda = virt_to_phys(ch->xid_id);
+ ch->ccw[13].cda = virt_to_dma32(ch->xid_id);
}
ch->ccw[13].flags = CCW_FLAG_SLI | CCW_FLAG_CC;
diff --git a/drivers/s390/net/fsm.c b/drivers/s390/net/fsm.c
index 0ff61d00fe..8672d225ba 100644
--- a/drivers/s390/net/fsm.c
+++ b/drivers/s390/net/fsm.c
@@ -9,7 +9,7 @@
#include <linux/slab.h>
#include <linux/timer.h>
-MODULE_AUTHOR("(C) 2000 IBM Corp. by Fritz Elfert (felfert@millenux.com)");
+MODULE_AUTHOR("(C) 2000 IBM Corp. by Fritz Elfert <felfert@millenux.com>");
MODULE_DESCRIPTION("Finite state machine helper functions");
MODULE_LICENSE("GPL");
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index a1f2acd6fb..25d4e63765 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -218,7 +218,7 @@ lcs_setup_read_ccws(struct lcs_card *card)
* we do not need to do set_normalized_cda.
*/
card->read.ccws[cnt].cda =
- (__u32)virt_to_phys(card->read.iob[cnt].data);
+ virt_to_dma32(card->read.iob[cnt].data);
((struct lcs_header *)
card->read.iob[cnt].data)->offset = LCS_ILLEGAL_OFFSET;
card->read.iob[cnt].callback = lcs_get_frames_cb;
@@ -230,8 +230,7 @@ lcs_setup_read_ccws(struct lcs_card *card)
card->read.ccws[LCS_NUM_BUFFS - 1].flags |= CCW_FLAG_SUSPEND;
/* Last ccw is a tic (transfer in channel). */
card->read.ccws[LCS_NUM_BUFFS].cmd_code = LCS_CCW_TRANSFER;
- card->read.ccws[LCS_NUM_BUFFS].cda =
- (__u32)virt_to_phys(card->read.ccws);
+ card->read.ccws[LCS_NUM_BUFFS].cda = virt_to_dma32(card->read.ccws);
/* Setg initial state of the read channel. */
card->read.state = LCS_CH_STATE_INIT;
@@ -273,12 +272,11 @@ lcs_setup_write_ccws(struct lcs_card *card)
* we do not need to do set_normalized_cda.
*/
card->write.ccws[cnt].cda =
- (__u32)virt_to_phys(card->write.iob[cnt].data);
+ virt_to_dma32(card->write.iob[cnt].data);
}
/* Last ccw is a tic (transfer in channel). */
card->write.ccws[LCS_NUM_BUFFS].cmd_code = LCS_CCW_TRANSFER;
- card->write.ccws[LCS_NUM_BUFFS].cda =
- (__u32)virt_to_phys(card->write.ccws);
+ card->write.ccws[LCS_NUM_BUFFS].cda = virt_to_dma32(card->write.ccws);
/* Set initial state of the write channel. */
card->read.state = LCS_CH_STATE_INIT;
@@ -1399,7 +1397,7 @@ lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb)
if ((channel->state != LCS_CH_STATE_INIT) &&
(irb->scsw.cmd.fctl & SCSW_FCTL_START_FUNC) &&
(irb->scsw.cmd.cpa != 0)) {
- index = (struct ccw1 *) __va((addr_t) irb->scsw.cmd.cpa)
+ index = (struct ccw1 *)dma32_to_virt(irb->scsw.cmd.cpa)
- channel->ccws;
if ((irb->scsw.cmd.actl & SCSW_ACTL_SUSPENDED) ||
(irb->scsw.cmd.cstat & SCHN_STAT_PCI))
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 613eab7297..41fe8a043d 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -956,7 +956,7 @@ static inline struct dst_entry *qeth_dst_check_rcu(struct sk_buff *skb,
struct dst_entry *dst = skb_dst(skb);
struct rt6_info *rt;
- rt = (struct rt6_info *) dst;
+ rt = dst_rt6_info(dst);
if (dst) {
if (proto == htons(ETH_P_IPV6))
dst = dst_check(dst, rt6_get_cookie(rt));
@@ -970,15 +970,14 @@ static inline struct dst_entry *qeth_dst_check_rcu(struct sk_buff *skb,
static inline __be32 qeth_next_hop_v4_rcu(struct sk_buff *skb,
struct dst_entry *dst)
{
- struct rtable *rt = (struct rtable *) dst;
-
- return (rt) ? rt_nexthop(rt, ip_hdr(skb)->daddr) : ip_hdr(skb)->daddr;
+ return (dst) ? rt_nexthop(dst_rtable(dst), ip_hdr(skb)->daddr) :
+ ip_hdr(skb)->daddr;
}
static inline struct in6_addr *qeth_next_hop_v6_rcu(struct sk_buff *skb,
struct dst_entry *dst)
{
- struct rt6_info *rt = (struct rt6_info *) dst;
+ struct rt6_info *rt = dst_rt6_info(dst);
if (rt && !ipv6_addr_any(&rt->rt6i_gateway))
return &rt->rt6i_gateway;
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index dc5ae75d46..a3adaec550 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -429,7 +429,7 @@ static void qeth_setup_ccw(struct ccw1 *ccw, u8 cmd_code, u8 flags, u32 len,
ccw->cmd_code = cmd_code;
ccw->flags = flags | CCW_FLAG_SLI;
ccw->count = len;
- ccw->cda = (__u32)virt_to_phys(data);
+ ccw->cda = virt_to_dma32(data);
}
static int __qeth_issue_next_read(struct qeth_card *card)
@@ -1396,7 +1396,7 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
qeth_tx_complete_buf(queue, buf, error, budget);
for (i = 0; i < queue->max_elements; ++i) {
- void *data = phys_to_virt(buf->buffer->element[i].addr);
+ void *data = dma64_to_virt(buf->buffer->element[i].addr);
if (__test_and_clear_bit(i, buf->from_kmem_cache) && data)
kmem_cache_free(qeth_core_header_cache, data);
@@ -1441,7 +1441,7 @@ static void qeth_tx_complete_pending_bufs(struct qeth_card *card,
for (i = 0;
i < aob->sb_count && i < queue->max_elements;
i++) {
- void *data = phys_to_virt(aob->sba[i]);
+ void *data = dma64_to_virt(aob->sba[i]);
if (test_bit(i, buf->from_kmem_cache) && data)
kmem_cache_free(qeth_core_header_cache,
@@ -2958,8 +2958,8 @@ static int qeth_init_input_buffer(struct qeth_card *card,
*/
for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
buf->buffer->element[i].length = PAGE_SIZE;
- buf->buffer->element[i].addr =
- page_to_phys(pool_entry->elements[i]);
+ buf->buffer->element[i].addr = u64_to_dma64(
+ page_to_phys(pool_entry->elements[i]));
if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY;
else
@@ -3792,9 +3792,9 @@ static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) &&
buffer->element[e].addr) {
- unsigned long phys_aob_addr = buffer->element[e].addr;
+ dma64_t phys_aob_addr = buffer->element[e].addr;
- qeth_qdio_handle_aob(card, phys_to_virt(phys_aob_addr));
+ qeth_qdio_handle_aob(card, dma64_to_virt(phys_aob_addr));
++e;
}
qeth_scrub_qdio_buffer(buffer, QDIO_MAX_ELEMENTS_PER_BUFFER);
@@ -4069,7 +4069,7 @@ static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf,
if (hd_len) {
is_first_elem = false;
- buffer->element[element].addr = virt_to_phys(hdr);
+ buffer->element[element].addr = virt_to_dma64(hdr);
buffer->element[element].length = hd_len;
buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
@@ -4090,7 +4090,7 @@ static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf,
elem_length = min_t(unsigned int, length,
PAGE_SIZE - offset_in_page(data));
- buffer->element[element].addr = virt_to_phys(data);
+ buffer->element[element].addr = virt_to_dma64(data);
buffer->element[element].length = elem_length;
length -= elem_length;
if (is_first_elem) {
@@ -4120,7 +4120,7 @@ static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf,
elem_length = min_t(unsigned int, length,
PAGE_SIZE - offset_in_page(data));
- buffer->element[element].addr = virt_to_phys(data);
+ buffer->element[element].addr = virt_to_dma64(data);
buffer->element[element].length = elem_length;
buffer->element[element].eflags =
SBAL_EFLAGS_MIDDLE_FRAG;
@@ -5596,7 +5596,7 @@ next_packet:
offset = 0;
}
- hdr = phys_to_virt(element->addr) + offset;
+ hdr = dma64_to_virt(element->addr) + offset;
offset += sizeof(*hdr);
skb = NULL;
@@ -5688,7 +5688,7 @@ use_skb:
walk_packet:
while (skb_len) {
int data_len = min(skb_len, (int)(element->length - offset));
- char *data = phys_to_virt(element->addr) + offset;
+ char *data = dma64_to_virt(element->addr) + offset;
skb_len -= data_len;
offset += data_len;
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index ceed1b6f7c..22e8200033 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -2742,7 +2742,7 @@ void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
for (idx = 0; idx < QDIO_MAX_ELEMENTS_PER_BUFFER; idx++) {
sbale = &sbal->element[idx];
- req_id = sbale->addr;
+ req_id = dma64_to_u64(sbale->addr);
fsf_req = zfcp_reqlist_find_rm(adapter->req_list, req_id);
if (!fsf_req) {
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index f54f506b02..8cbc5e1711 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -125,7 +125,7 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
memset(pl, 0,
ZFCP_QDIO_MAX_SBALS_PER_REQ * sizeof(void *));
sbale = qdio->res_q[idx]->element;
- req_id = sbale->addr;
+ req_id = dma64_to_u64(sbale->addr);
scount = min(sbale->scount + 1,
ZFCP_QDIO_MAX_SBALS_PER_REQ + 1);
/* incl. signaling SBAL */
@@ -256,7 +256,7 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
q_req->sbal_number);
return -EINVAL;
}
- sbale->addr = sg_phys(sg);
+ sbale->addr = u64_to_dma64(sg_phys(sg));
sbale->length = sg->length;
}
return 0;
diff --git a/drivers/s390/scsi/zfcp_qdio.h b/drivers/s390/scsi/zfcp_qdio.h
index 90134d9b69..8f7d2ae944 100644
--- a/drivers/s390/scsi/zfcp_qdio.h
+++ b/drivers/s390/scsi/zfcp_qdio.h
@@ -129,14 +129,14 @@ void zfcp_qdio_req_init(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
% QDIO_MAX_BUFFERS_PER_Q;
sbale = zfcp_qdio_sbale_req(qdio, q_req);
- sbale->addr = req_id;
+ sbale->addr = u64_to_dma64(req_id);
sbale->eflags = 0;
sbale->sflags = SBAL_SFLAGS0_COMMAND | sbtype;
if (unlikely(!data))
return;
sbale++;
- sbale->addr = virt_to_phys(data);
+ sbale->addr = virt_to_dma64(data);
sbale->length = len;
}
@@ -159,7 +159,7 @@ void zfcp_qdio_fill_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
BUG_ON(q_req->sbale_curr == qdio->max_sbale_per_sbal - 1);
q_req->sbale_curr++;
sbale = zfcp_qdio_sbale_curr(qdio, q_req);
- sbale->addr = virt_to_phys(data);
+ sbale->addr = virt_to_dma64(data);
sbale->length = len;
}
diff --git a/drivers/s390/virtio/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index ac67576301..d7569f3955 100644
--- a/drivers/s390/virtio/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
@@ -72,6 +72,7 @@ struct virtio_ccw_device {
unsigned int config_ready;
void *airq_info;
struct vcdev_dma_area *dma_area;
+ dma32_t dma_area_addr;
};
static inline unsigned long *indicators(struct virtio_ccw_device *vcdev)
@@ -84,20 +85,50 @@ static inline unsigned long *indicators2(struct virtio_ccw_device *vcdev)
return &vcdev->dma_area->indicators2;
}
+/* Spec stipulates a 64 bit address */
+static inline dma64_t indicators_dma(struct virtio_ccw_device *vcdev)
+{
+ u64 dma_area_addr = dma32_to_u32(vcdev->dma_area_addr);
+
+ return dma64_add(u64_to_dma64(dma_area_addr),
+ offsetof(struct vcdev_dma_area, indicators));
+}
+
+/* Spec stipulates a 64 bit address */
+static inline dma64_t indicators2_dma(struct virtio_ccw_device *vcdev)
+{
+ u64 dma_area_addr = dma32_to_u32(vcdev->dma_area_addr);
+
+ return dma64_add(u64_to_dma64(dma_area_addr),
+ offsetof(struct vcdev_dma_area, indicators2));
+}
+
+static inline dma32_t config_block_dma(struct virtio_ccw_device *vcdev)
+{
+ return dma32_add(vcdev->dma_area_addr,
+ offsetof(struct vcdev_dma_area, config_block));
+}
+
+static inline dma32_t status_dma(struct virtio_ccw_device *vcdev)
+{
+ return dma32_add(vcdev->dma_area_addr,
+ offsetof(struct vcdev_dma_area, status));
+}
+
struct vq_info_block_legacy {
- __u64 queue;
+ dma64_t queue;
__u32 align;
__u16 index;
__u16 num;
} __packed;
struct vq_info_block {
- __u64 desc;
+ dma64_t desc;
__u32 res0;
__u16 index;
__u16 num;
- __u64 avail;
- __u64 used;
+ dma64_t avail;
+ dma64_t used;
} __packed;
struct virtio_feature_desc {
@@ -106,8 +137,8 @@ struct virtio_feature_desc {
} __packed;
struct virtio_thinint_area {
- unsigned long summary_indicator;
- unsigned long indicator;
+ dma64_t summary_indicator;
+ dma64_t indicator;
u64 bit_nr;
u8 isc;
} __packed;
@@ -123,6 +154,7 @@ struct virtio_rev_info {
struct virtio_ccw_vq_info {
struct virtqueue *vq;
+ dma32_t info_block_addr;
int num;
union {
struct vq_info_block s;
@@ -156,6 +188,11 @@ static inline u8 *get_summary_indicator(struct airq_info *info)
return summary_indicators + info->summary_indicator_idx;
}
+static inline dma64_t get_summary_indicator_dma(struct airq_info *info)
+{
+ return virt_to_dma64(get_summary_indicator(info));
+}
+
#define CCW_CMD_SET_VQ 0x13
#define CCW_CMD_VDEV_RESET 0x33
#define CCW_CMD_SET_IND 0x43
@@ -260,12 +297,12 @@ static struct airq_info *new_airq_info(int index)
return info;
}
-static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs,
- u64 *first, void **airq_info)
+static unsigned long *get_airq_indicator(struct virtqueue *vqs[], int nvqs,
+ u64 *first, void **airq_info)
{
int i, j;
struct airq_info *info;
- unsigned long indicator_addr = 0;
+ unsigned long *indicator_addr = NULL;
unsigned long bit, flags;
for (i = 0; i < MAX_AIRQ_AREAS && !indicator_addr; i++) {
@@ -275,7 +312,7 @@ static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs,
info = airq_areas[i];
mutex_unlock(&airq_areas_lock);
if (!info)
- return 0;
+ return NULL;
write_lock_irqsave(&info->lock, flags);
bit = airq_iv_alloc(info->aiv, nvqs);
if (bit == -1UL) {
@@ -285,7 +322,7 @@ static unsigned long get_airq_indicator(struct virtqueue *vqs[], int nvqs,
}
*first = bit;
*airq_info = info;
- indicator_addr = (unsigned long)info->aiv->vector;
+ indicator_addr = info->aiv->vector;
for (j = 0; j < nvqs; j++) {
airq_iv_set_ptr(info->aiv, bit + j,
(unsigned long)vqs[j]);
@@ -348,31 +385,31 @@ static void virtio_ccw_drop_indicator(struct virtio_ccw_device *vcdev,
struct ccw1 *ccw)
{
int ret;
- unsigned long *indicatorp = NULL;
struct virtio_thinint_area *thinint_area = NULL;
struct airq_info *airq_info = vcdev->airq_info;
+ dma64_t *indicatorp = NULL;
if (vcdev->is_thinint) {
thinint_area = ccw_device_dma_zalloc(vcdev->cdev,
- sizeof(*thinint_area));
+ sizeof(*thinint_area),
+ &ccw->cda);
if (!thinint_area)
return;
thinint_area->summary_indicator =
- (unsigned long) get_summary_indicator(airq_info);
+ get_summary_indicator_dma(airq_info);
thinint_area->isc = VIRTIO_AIRQ_ISC;
ccw->cmd_code = CCW_CMD_SET_IND_ADAPTER;
ccw->count = sizeof(*thinint_area);
- ccw->cda = (__u32)virt_to_phys(thinint_area);
} else {
/* payload is the address of the indicators */
indicatorp = ccw_device_dma_zalloc(vcdev->cdev,
- sizeof(indicators(vcdev)));
+ sizeof(*indicatorp),
+ &ccw->cda);
if (!indicatorp)
return;
*indicatorp = 0;
ccw->cmd_code = CCW_CMD_SET_IND;
- ccw->count = sizeof(indicators(vcdev));
- ccw->cda = (__u32)virt_to_phys(indicatorp);
+ ccw->count = sizeof(*indicatorp);
}
/* Deregister indicators from host. */
*indicators(vcdev) = 0;
@@ -386,7 +423,7 @@ static void virtio_ccw_drop_indicator(struct virtio_ccw_device *vcdev,
"Failed to deregister indicators (%d)\n", ret);
else if (vcdev->is_thinint)
virtio_ccw_drop_indicators(vcdev);
- ccw_device_dma_free(vcdev->cdev, indicatorp, sizeof(indicators(vcdev)));
+ ccw_device_dma_free(vcdev->cdev, indicatorp, sizeof(*indicatorp));
ccw_device_dma_free(vcdev->cdev, thinint_area, sizeof(*thinint_area));
}
@@ -426,7 +463,7 @@ static int virtio_ccw_read_vq_conf(struct virtio_ccw_device *vcdev,
ccw->cmd_code = CCW_CMD_READ_VQ_CONF;
ccw->flags = 0;
ccw->count = sizeof(struct vq_config_block);
- ccw->cda = (__u32)virt_to_phys(&vcdev->dma_area->config_block);
+ ccw->cda = config_block_dma(vcdev);
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_VQ_CONF);
if (ret)
return ret;
@@ -463,7 +500,7 @@ static void virtio_ccw_del_vq(struct virtqueue *vq, struct ccw1 *ccw)
}
ccw->cmd_code = CCW_CMD_SET_VQ;
ccw->flags = 0;
- ccw->cda = (__u32)virt_to_phys(info->info_block);
+ ccw->cda = info->info_block_addr;
ret = ccw_io_helper(vcdev, ccw,
VIRTIO_CCW_DOING_SET_VQ | index);
/*
@@ -486,7 +523,7 @@ static void virtio_ccw_del_vqs(struct virtio_device *vdev)
struct ccw1 *ccw;
struct virtio_ccw_device *vcdev = to_vc_device(vdev);
- ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL);
if (!ccw)
return;
@@ -525,7 +562,8 @@ static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev,
goto out_err;
}
info->info_block = ccw_device_dma_zalloc(vcdev->cdev,
- sizeof(*info->info_block));
+ sizeof(*info->info_block),
+ &info->info_block_addr);
if (!info->info_block) {
dev_warn(&vcdev->cdev->dev, "no info block\n");
err = -ENOMEM;
@@ -556,22 +594,22 @@ static struct virtqueue *virtio_ccw_setup_vq(struct virtio_device *vdev,
/* Register it with the host. */
queue = virtqueue_get_desc_addr(vq);
if (vcdev->revision == 0) {
- info->info_block->l.queue = queue;
+ info->info_block->l.queue = u64_to_dma64(queue);
info->info_block->l.align = KVM_VIRTIO_CCW_RING_ALIGN;
info->info_block->l.index = i;
info->info_block->l.num = info->num;
ccw->count = sizeof(info->info_block->l);
} else {
- info->info_block->s.desc = queue;
+ info->info_block->s.desc = u64_to_dma64(queue);
info->info_block->s.index = i;
info->info_block->s.num = info->num;
- info->info_block->s.avail = (__u64)virtqueue_get_avail_addr(vq);
- info->info_block->s.used = (__u64)virtqueue_get_used_addr(vq);
+ info->info_block->s.avail = u64_to_dma64(virtqueue_get_avail_addr(vq));
+ info->info_block->s.used = u64_to_dma64(virtqueue_get_used_addr(vq));
ccw->count = sizeof(info->info_block->s);
}
ccw->cmd_code = CCW_CMD_SET_VQ;
ccw->flags = 0;
- ccw->cda = (__u32)virt_to_phys(info->info_block);
+ ccw->cda = info->info_block_addr;
err = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_VQ | i);
if (err) {
dev_warn(&vcdev->cdev->dev, "SET_VQ failed\n");
@@ -605,11 +643,12 @@ static int virtio_ccw_register_adapter_ind(struct virtio_ccw_device *vcdev,
{
int ret;
struct virtio_thinint_area *thinint_area = NULL;
- unsigned long indicator_addr;
+ unsigned long *indicator_addr;
struct airq_info *info;
thinint_area = ccw_device_dma_zalloc(vcdev->cdev,
- sizeof(*thinint_area));
+ sizeof(*thinint_area),
+ &ccw->cda);
if (!thinint_area) {
ret = -ENOMEM;
goto out;
@@ -622,15 +661,13 @@ static int virtio_ccw_register_adapter_ind(struct virtio_ccw_device *vcdev,
ret = -ENOSPC;
goto out;
}
- thinint_area->indicator = virt_to_phys((void *)indicator_addr);
+ thinint_area->indicator = virt_to_dma64(indicator_addr);
info = vcdev->airq_info;
- thinint_area->summary_indicator =
- virt_to_phys(get_summary_indicator(info));
+ thinint_area->summary_indicator = get_summary_indicator_dma(info);
thinint_area->isc = VIRTIO_AIRQ_ISC;
ccw->cmd_code = CCW_CMD_SET_IND_ADAPTER;
ccw->flags = CCW_FLAG_SLI;
ccw->count = sizeof(*thinint_area);
- ccw->cda = (__u32)virt_to_phys(thinint_area);
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND_ADAPTER);
if (ret) {
if (ret == -EOPNOTSUPP) {
@@ -658,11 +695,11 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
struct irq_affinity *desc)
{
struct virtio_ccw_device *vcdev = to_vc_device(vdev);
- unsigned long *indicatorp = NULL;
+ dma64_t *indicatorp = NULL;
int ret, i, queue_idx = 0;
struct ccw1 *ccw;
- ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL);
if (!ccw)
return -ENOMEM;
@@ -687,10 +724,11 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
* the address of the indicators.
*/
indicatorp = ccw_device_dma_zalloc(vcdev->cdev,
- sizeof(indicators(vcdev)));
+ sizeof(*indicatorp),
+ &ccw->cda);
if (!indicatorp)
goto out;
- *indicatorp = (unsigned long) indicators(vcdev);
+ *indicatorp = indicators_dma(vcdev);
if (vcdev->is_thinint) {
ret = virtio_ccw_register_adapter_ind(vcdev, vqs, nvqs, ccw);
if (ret)
@@ -702,32 +740,30 @@ static int virtio_ccw_find_vqs(struct virtio_device *vdev, unsigned nvqs,
*indicators(vcdev) = 0;
ccw->cmd_code = CCW_CMD_SET_IND;
ccw->flags = 0;
- ccw->count = sizeof(indicators(vcdev));
- ccw->cda = (__u32)virt_to_phys(indicatorp);
+ ccw->count = sizeof(*indicatorp);
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_IND);
if (ret)
goto out;
}
/* Register indicators2 with host for config changes */
- *indicatorp = (unsigned long) indicators2(vcdev);
+ *indicatorp = indicators2_dma(vcdev);
*indicators2(vcdev) = 0;
ccw->cmd_code = CCW_CMD_SET_CONF_IND;
ccw->flags = 0;
- ccw->count = sizeof(indicators2(vcdev));
- ccw->cda = (__u32)virt_to_phys(indicatorp);
+ ccw->count = sizeof(*indicatorp);
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_SET_CONF_IND);
if (ret)
goto out;
if (indicatorp)
ccw_device_dma_free(vcdev->cdev, indicatorp,
- sizeof(indicators(vcdev)));
+ sizeof(*indicatorp));
ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
return 0;
out:
if (indicatorp)
ccw_device_dma_free(vcdev->cdev, indicatorp,
- sizeof(indicators(vcdev)));
+ sizeof(*indicatorp));
ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
virtio_ccw_del_vqs(vdev);
return ret;
@@ -738,7 +774,7 @@ static void virtio_ccw_reset(struct virtio_device *vdev)
struct virtio_ccw_device *vcdev = to_vc_device(vdev);
struct ccw1 *ccw;
- ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL);
if (!ccw)
return;
@@ -762,11 +798,12 @@ static u64 virtio_ccw_get_features(struct virtio_device *vdev)
u64 rc;
struct ccw1 *ccw;
- ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL);
if (!ccw)
return 0;
- features = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*features));
+ features = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*features),
+ &ccw->cda);
if (!features) {
rc = 0;
goto out_free;
@@ -776,7 +813,6 @@ static u64 virtio_ccw_get_features(struct virtio_device *vdev)
ccw->cmd_code = CCW_CMD_READ_FEAT;
ccw->flags = 0;
ccw->count = sizeof(*features);
- ccw->cda = (__u32)virt_to_phys(features);
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_FEAT);
if (ret) {
rc = 0;
@@ -793,7 +829,6 @@ static u64 virtio_ccw_get_features(struct virtio_device *vdev)
ccw->cmd_code = CCW_CMD_READ_FEAT;
ccw->flags = 0;
ccw->count = sizeof(*features);
- ccw->cda = (__u32)virt_to_phys(features);
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_FEAT);
if (ret == 0)
rc |= (u64)le32_to_cpu(features->features) << 32;
@@ -825,11 +860,12 @@ static int virtio_ccw_finalize_features(struct virtio_device *vdev)
return -EINVAL;
}
- ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL);
if (!ccw)
return -ENOMEM;
- features = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*features));
+ features = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*features),
+ &ccw->cda);
if (!features) {
ret = -ENOMEM;
goto out_free;
@@ -846,7 +882,6 @@ static int virtio_ccw_finalize_features(struct virtio_device *vdev)
ccw->cmd_code = CCW_CMD_WRITE_FEAT;
ccw->flags = 0;
ccw->count = sizeof(*features);
- ccw->cda = (__u32)virt_to_phys(features);
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_FEAT);
if (ret)
goto out_free;
@@ -860,7 +895,6 @@ static int virtio_ccw_finalize_features(struct virtio_device *vdev)
ccw->cmd_code = CCW_CMD_WRITE_FEAT;
ccw->flags = 0;
ccw->count = sizeof(*features);
- ccw->cda = (__u32)virt_to_phys(features);
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_FEAT);
out_free:
@@ -879,12 +913,13 @@ static void virtio_ccw_get_config(struct virtio_device *vdev,
void *config_area;
unsigned long flags;
- ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL);
if (!ccw)
return;
config_area = ccw_device_dma_zalloc(vcdev->cdev,
- VIRTIO_CCW_CONFIG_SIZE);
+ VIRTIO_CCW_CONFIG_SIZE,
+ &ccw->cda);
if (!config_area)
goto out_free;
@@ -892,7 +927,6 @@ static void virtio_ccw_get_config(struct virtio_device *vdev,
ccw->cmd_code = CCW_CMD_READ_CONF;
ccw->flags = 0;
ccw->count = offset + len;
- ccw->cda = (__u32)virt_to_phys(config_area);
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_CONFIG);
if (ret)
goto out_free;
@@ -919,12 +953,13 @@ static void virtio_ccw_set_config(struct virtio_device *vdev,
void *config_area;
unsigned long flags;
- ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL);
if (!ccw)
return;
config_area = ccw_device_dma_zalloc(vcdev->cdev,
- VIRTIO_CCW_CONFIG_SIZE);
+ VIRTIO_CCW_CONFIG_SIZE,
+ &ccw->cda);
if (!config_area)
goto out_free;
@@ -939,7 +974,6 @@ static void virtio_ccw_set_config(struct virtio_device *vdev,
ccw->cmd_code = CCW_CMD_WRITE_CONF;
ccw->flags = 0;
ccw->count = offset + len;
- ccw->cda = (__u32)virt_to_phys(config_area);
ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_CONFIG);
out_free:
@@ -956,14 +990,14 @@ static u8 virtio_ccw_get_status(struct virtio_device *vdev)
if (vcdev->revision < 2)
return vcdev->dma_area->status;
- ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL);
if (!ccw)
return old_status;
ccw->cmd_code = CCW_CMD_READ_STATUS;
ccw->flags = 0;
ccw->count = sizeof(vcdev->dma_area->status);
- ccw->cda = (__u32)virt_to_phys(&vcdev->dma_area->status);
+ ccw->cda = status_dma(vcdev);
ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_READ_STATUS);
/*
* If the channel program failed (should only happen if the device
@@ -983,7 +1017,7 @@ static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status)
struct ccw1 *ccw;
int ret;
- ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL);
if (!ccw)
return;
@@ -992,11 +1026,11 @@ static void virtio_ccw_set_status(struct virtio_device *vdev, u8 status)
ccw->cmd_code = CCW_CMD_WRITE_STATUS;
ccw->flags = 0;
ccw->count = sizeof(status);
- ccw->cda = (__u32)virt_to_phys(&vcdev->dma_area->status);
/* We use ssch for setting the status which is a serializing
* instruction that guarantees the memory writes have
* completed before ssch.
*/
+ ccw->cda = status_dma(vcdev);
ret = ccw_io_helper(vcdev, ccw, VIRTIO_CCW_DOING_WRITE_STATUS);
/* Write failed? We assume status is unchanged. */
if (ret)
@@ -1278,10 +1312,10 @@ static int virtio_ccw_set_transport_rev(struct virtio_ccw_device *vcdev)
struct ccw1 *ccw;
int ret;
- ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw));
+ ccw = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*ccw), NULL);
if (!ccw)
return -ENOMEM;
- rev = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*rev));
+ rev = ccw_device_dma_zalloc(vcdev->cdev, sizeof(*rev), &ccw->cda);
if (!rev) {
ccw_device_dma_free(vcdev->cdev, ccw, sizeof(*ccw));
return -ENOMEM;
@@ -1291,7 +1325,6 @@ static int virtio_ccw_set_transport_rev(struct virtio_ccw_device *vcdev)
ccw->cmd_code = CCW_CMD_SET_VIRTIO_REV;
ccw->flags = 0;
ccw->count = sizeof(*rev);
- ccw->cda = (__u32)virt_to_phys(rev);
vcdev->revision = VIRTIO_CCW_REV_MAX;
do {
@@ -1333,7 +1366,8 @@ static int virtio_ccw_online(struct ccw_device *cdev)
vcdev->vdev.dev.parent = &cdev->dev;
vcdev->cdev = cdev;
vcdev->dma_area = ccw_device_dma_zalloc(vcdev->cdev,
- sizeof(*vcdev->dma_area));
+ sizeof(*vcdev->dma_area),
+ &vcdev->dma_area_addr);
if (!vcdev->dma_area) {
ret = -ENOMEM;
goto out_free;